code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class UpperCamelCase__ : '''simple docstring''' def __init__( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int]=13 ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=99 ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Union[str, Any]=5 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : int=16 ,lowerCamelCase__ : Optional[Any]=2 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : List[Any]=4 ,lowerCamelCase__ : Union[str, Any]=None ,) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = self.vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices ) SCREAMING_SNAKE_CASE = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,*lowerCamelCase__ : Tuple ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,head_mask=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,*lowerCamelCase__ : Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTLMHeadModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,*lowerCamelCase__ : str ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,*lowerCamelCase__ : Any ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = OpenAIGPTForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ) = config_and_inputs SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Union[str, Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __snake_case : Union[str, Any] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __snake_case : Optional[int] = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = inputs_dict["""labels"""] SCREAMING_SNAKE_CASE = inputs_dict["""labels"""] SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ ) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,n_embd=37 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = OpenAIGPTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=lowerCamelCase__ ) # the president is SCREAMING_SNAKE_CASE = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase__ ,do_sample=lowerCamelCase__ ) self.assertListEqual(output_ids[0].tolist() ,lowerCamelCase__ )
296
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if "://" in dataset_path: SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1] return dataset_path def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) ) else: fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> None: '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = threading.Lock()
296
1
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class UpperCamelCase__ ( unittest.TestCase , lowerCAmelCase_ ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = load_tool("""text-to-speech""" ) self.tool.setup() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = self.tool("""hey""" ) SCREAMING_SNAKE_CASE = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] ,torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) ,) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = self.tool("""hey""" ) SCREAMING_SNAKE_CASE = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] ,torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) ,) )
296
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.dummy_uncond_unet SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256""" SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
296
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __snake_case : Tuple = "focalnet" def __init__( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any]=224 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Any=96 ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Optional[int]=[192, 384, 768, 768] ,lowerCamelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCamelCase__ : Optional[Any]=[2, 2, 2, 2] ,lowerCamelCase__ : Optional[Any]=[3, 3, 3, 3] ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : List[Any]=4.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=1e-4 ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : int=False ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : Any=1e-5 ,lowerCamelCase__ : List[Any]=32 ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : Any ,) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = use_conv_embed SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = focal_levels SCREAMING_SNAKE_CASE = focal_windows SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = use_layerscale SCREAMING_SNAKE_CASE = layerscale_value SCREAMING_SNAKE_CASE = use_post_layernorm SCREAMING_SNAKE_CASE = use_post_layernorm_in_modulation SCREAMING_SNAKE_CASE = normalize_modulator SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = encoder_stride SCREAMING_SNAKE_CASE = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ ,out_indices=lowerCamelCase__ ,stage_names=self.stage_names )
296
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]: '''simple docstring''' super().__init__( features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = Generator( cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,) SCREAMING_SNAKE_CASE = self.builder.as_dataset( split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory ) return dataset
296
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Any: '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any]=7 ,lowerCamelCase__ : Optional[int]=400 ,lowerCamelCase__ : Tuple=2000 ,lowerCamelCase__ : Any=10 ,lowerCamelCase__ : Tuple=160 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : str=4000 ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Tuple=True ,) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = padding_value SCREAMING_SNAKE_CASE = sampling_rate SCREAMING_SNAKE_CASE = return_attention_mask SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = hop_length def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=False ) -> str: '''simple docstring''' def _flatten(lowerCamelCase__ : List[Any] ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Any = WhisperFeatureExtractor if is_speech_available() else None def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = WhisperFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,padding="""max_length""" ,return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input SCREAMING_SNAKE_CASE = feature_extractor(speech_inputs[0] ,return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ).input_features self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) ) # Test truncation required SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] SCREAMING_SNAKE_CASE = [x[: feature_extractor.n_samples] for x in speech_inputs] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated] SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' import torch SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE = np.random.rand(100 ,32 ).astype(np.floataa ) SCREAMING_SNAKE_CASE = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = WhisperFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape ,(1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] ,lowerCamelCase__ ,atol=1e-4 ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE = self._load_datasamples(1 )[0] SCREAMING_SNAKE_CASE = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue SCREAMING_SNAKE_CASE = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowerCamelCase__ )[0] self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
296
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) __snake_case : Optional[str] = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
296
1
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Dict ,lowerCamelCase__ : CLIPSegForImageSegmentation ,lowerCamelCase__ : CLIPSegProcessor ,lowerCamelCase__ : AutoencoderKL ,lowerCamelCase__ : CLIPTextModel ,lowerCamelCase__ : CLIPTokenizer ,lowerCamelCase__ : UNetaDConditionModel ,lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,lowerCamelCase__ : StableDiffusionSafetyChecker ,lowerCamelCase__ : CLIPImageProcessor ,) -> Union[str, Any]: '''simple docstring''' super().__init__() if hasattr(scheduler.config ,"""steps_offset""" ) and scheduler.config.steps_offset != 1: SCREAMING_SNAKE_CASE = ( F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ """to update the config accordingly as leaving `steps_offset` might led to incorrect results""" """ in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,""" """ it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`""" """ file""" ) deprecate("""steps_offset!=1""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = dict(scheduler.config ) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = FrozenDict(lowerCamelCase__ ) if hasattr(scheduler.config ,"""skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False: SCREAMING_SNAKE_CASE = ( F"""The configuration file of this scheduler: {scheduler} has not set the configuration""" """ `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make""" """ sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to""" """ incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face""" """ Hub, it would be very nice if you could open a Pull request for the""" """ `scheduler/scheduler_config.json` file""" ) deprecate("""skip_prk_steps not set""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = dict(scheduler.config ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = FrozenDict(lowerCamelCase__ ) if safety_checker is None: logger.warning( F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( segmentation_model=lowerCamelCase__ ,segmentation_processor=lowerCamelCase__ ,vae=lowerCamelCase__ ,text_encoder=lowerCamelCase__ ,tokenizer=lowerCamelCase__ ,unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: '''simple docstring''' self.enable_attention_slicing(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE = torch.device("""cuda""" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase__ ,lowerCamelCase__ ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: '''simple docstring''' if self.device != torch.device("""meta""" ) or not hasattr(self.unet ,"""_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase__ ,"""_hf_hook""" ) and hasattr(module._hf_hook ,"""execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Tuple ,lowerCamelCase__ : Union[str, List[str]] ,lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] ,lowerCamelCase__ : str ,lowerCamelCase__ : int = 512 ,lowerCamelCase__ : int = 512 ,lowerCamelCase__ : int = 50 ,lowerCamelCase__ : float = 7.5 ,lowerCamelCase__ : Optional[Union[str, List[str]]] = None ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : Optional[torch.Generator] = None ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[str] = "pil" ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase__ : int = 1 ,**lowerCamelCase__ : Optional[int] ,) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.segmentation_processor( text=[text] ,images=[image] ,padding="""max_length""" ,return_tensors="""pt""" ).to(self.device ) SCREAMING_SNAKE_CASE = self.segmentation_model(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCamelCase__ )[0].resize(image.size ) # Run inpainting pipeline with the generated mask SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline( vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,) return inpainting_pipeline( prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,height=lowerCamelCase__ ,width=lowerCamelCase__ ,num_inference_steps=lowerCamelCase__ ,guidance_scale=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ ,eta=lowerCamelCase__ ,generator=lowerCamelCase__ ,latents=lowerCamelCase__ ,output_type=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,callback=lowerCamelCase__ ,callback_steps=lowerCamelCase__ ,)
296
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = TextToVideoSDPipeline __snake_case : int = TEXT_TO_IMAGE_PARAMS __snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __snake_case : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """np""" SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
296
1
from collections import defaultdict from math import gcd def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ): if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1: continue SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
296
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("""Invalid input""" ) SCREAMING_SNAKE_CASE = 10**n SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
296
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : '''simple docstring''' def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=14 ,lowerCamelCase__ : str=7 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Any=99 ,lowerCamelCase__ : Tuple=32 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : str=4 ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : List[str]=0.02 ,) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = rotary_dim SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = vocab_size - 1 SCREAMING_SNAKE_CASE = vocab_size - 1 SCREAMING_SNAKE_CASE = vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE = GPTJConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,use_cache=lowerCamelCase__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,rotary_dim=self.rotary_dim ,) return (config, input_ids, input_mask) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 20 SCREAMING_SNAKE_CASE = model_class_name(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model.init_cache(input_ids.shape[0] ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jnp.ones((input_ids.shape[0], max_decoder_length) ,dtype="""i4""" ) SCREAMING_SNAKE_CASE = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE = model( input_ids[:, :-1] ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype="""i4""" ) SCREAMING_SNAKE_CASE = model( input_ids[:, -1:] ,attention_mask=lowerCamelCase__ ,past_key_values=outputs_cache.past_key_values ,position_ids=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=F"""Max diff is {diff}""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 20 SCREAMING_SNAKE_CASE = model_class_name(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] ,axis=-1 ,) SCREAMING_SNAKE_CASE = model.init_cache(input_ids.shape[0] ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE = model( input_ids[:, :-1] ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype="""i4""" ) SCREAMING_SNAKE_CASE = model( input_ids[:, -1:] ,past_key_values=outputs_cache.past_key_values ,attention_mask=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=F"""Max diff is {diff}""" ) @require_flax class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __snake_case : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxGPTJModelTester(self ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) @tooslow def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""gpt2""" ,pad_token="""<|endoftext|>""" ,padding_side="""left""" ) SCREAMING_SNAKE_CASE = tokenizer(["""Hello this is a long string""", """Hey"""] ,return_tensors="""np""" ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = model.config.eos_token_id SCREAMING_SNAKE_CASE = jax.jit(model.generate ) SCREAMING_SNAKE_CASE = jit_generate( inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = pt_model_class(lowerCamelCase__ ).eval() SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ,dtype=jnp.floataa ) SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE = pt_model(**lowerCamelCase__ ).to_tuple() SCREAMING_SNAKE_CASE = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model_class.from_pretrained(lowerCamelCase__ ,from_pt=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = fx_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = pt_model_class(lowerCamelCase__ ).eval() SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ,dtype=jnp.floataa ) SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(lowerCamelCase__ ,fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE = pt_model(**lowerCamelCase__ ).to_tuple() SCREAMING_SNAKE_CASE = fx_model(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(lowerCamelCase__ ,from_flax=lowerCamelCase__ ) with torch.no_grad(): SCREAMING_SNAKE_CASE = pt_model_loaded(**lowerCamelCase__ ).to_tuple() self.assertEqual( len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) @tooslow def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ )
296
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE_ = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE_ = { """Salesforce/codegen-350M-mono""": 2_0_4_8, } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[Any] = VOCAB_FILES_NAMES __snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : Optional[int] = ["input_ids", "attention_mask"] __snake_case : str = CodeGenTokenizer def __init__( self : Optional[Any] ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : int="<|endoftext|>" ,lowerCamelCase__ : str="<|endoftext|>" ,lowerCamelCase__ : str="<|endoftext|>" ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : List[Any] ,) -> List[str]: '''simple docstring''' super().__init__( lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,**lowerCamelCase__ ,) if kwargs.pop("""add_bos_token""" ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = kwargs.pop("""name_or_path""" ,"""""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,lowerCamelCase__ ) != add_prefix_space: SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,pre_tok_state.pop("""type""" ) ) SCREAMING_SNAKE_CASE = add_prefix_space SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = add_prefix_space def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ) -> BatchEncoding: '''simple docstring''' SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" ,lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Dict ) -> BatchEncoding: '''simple docstring''' SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" ,lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[List[str]] = None ,**lowerCamelCase__ : List[Any] ,) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = super().decode( token_ids=lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ ,**lowerCamelCase__ ,) if truncate_before_pattern is not None and len(lowerCamelCase__ ) > 0: SCREAMING_SNAKE_CASE = self.truncate(lowerCamelCase__ ,lowerCamelCase__ ) return decoded_text def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ) -> str: '''simple docstring''' def find_re(lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ): SCREAMING_SNAKE_CASE = pattern.search(lowerCamelCase__ ,lowerCamelCase__ ) return m.start() if m else -1 SCREAMING_SNAKE_CASE = [re.compile(lowerCamelCase__ ,re.MULTILINE ) for pattern in truncate_before_pattern] SCREAMING_SNAKE_CASE = list(re.finditer("""^print""" ,lowerCamelCase__ ,re.MULTILINE ) ) if len(lowerCamelCase__ ) > 1: SCREAMING_SNAKE_CASE = completion[: prints[1].start()] SCREAMING_SNAKE_CASE = list(re.finditer("""^def""" ,lowerCamelCase__ ,re.MULTILINE ) ) if len(lowerCamelCase__ ) > 1: SCREAMING_SNAKE_CASE = completion[: defs[1].start()] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = [ pos for pos in [find_re(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for terminal in terminals] if pos != -1 ] if len(lowerCamelCase__ ) > 0: return completion[: min(lowerCamelCase__ )] else: return completion
296
from pathlib import Path import fire def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for path in src_dir.iterdir(): SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n] SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name ) print(_SCREAMING_SNAKE_CASE ) dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": fire.Fire(minify)
296
1
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_SCREAMING_SNAKE_CASE ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) SCREAMING_SNAKE_CASE = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format SCREAMING_SNAKE_CASE = PipelineDataFormat.from_str( format=_SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Any ,lowerCamelCase__ : Pipeline ,lowerCamelCase__ : PipelineDataFormat ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = nlp SCREAMING_SNAKE_CASE = reader @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : ArgumentParser ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = parser.add_parser("""run""" ,help="""Run a pipeline through the CLI""" ) run_parser.add_argument("""--task""" ,choices=get_supported_tasks() ,help="""Task to run""" ) run_parser.add_argument("""--input""" ,type=lowerCamelCase__ ,help="""Path to the file to use for inference""" ) run_parser.add_argument("""--output""" ,type=lowerCamelCase__ ,help="""Path to the file that will be used post to write results.""" ) run_parser.add_argument("""--model""" ,type=lowerCamelCase__ ,help="""Name or path to the model to instantiate.""" ) run_parser.add_argument("""--config""" ,type=lowerCamelCase__ ,help="""Name or path to the model's config to instantiate.""" ) run_parser.add_argument( """--tokenizer""" ,type=lowerCamelCase__ ,help="""Name of the tokenizer to use. (default: same as the model name)""" ) run_parser.add_argument( """--column""" ,type=lowerCamelCase__ ,help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" ,) run_parser.add_argument( """--format""" ,type=lowerCamelCase__ ,default="""infer""" ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help="""Input format to read from""" ,) run_parser.add_argument( """--device""" ,type=lowerCamelCase__ ,default=-1 ,help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" ,) run_parser.add_argument("""--overwrite""" ,action="""store_true""" ,help="""Allow overwriting the output file.""" ) run_parser.set_defaults(func=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self._nlp, [] for entry in self._reader: SCREAMING_SNAKE_CASE = nlp(**lowerCamelCase__ ) if self._reader.is_multi_columns else nlp(lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): outputs.append(lowerCamelCase__ ) else: outputs += output # Saving data if self._nlp.binary_output: SCREAMING_SNAKE_CASE = self._reader.save_binary(lowerCamelCase__ ) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(lowerCamelCase__ )
296
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
1
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets SCREAMING_SNAKE_CASE_ = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ SCREAMING_SNAKE_CASE_ = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ SCREAMING_SNAKE_CASE_ = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] ,reference_urls=[ """https://en.wikipedia.org/wiki/ROUGE_(metric)""", """https://github.com/google-research/google-research/tree/master/rouge""", ] ,) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=False ) -> Dict: '''simple docstring''' if rouge_types is None: SCREAMING_SNAKE_CASE = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] SCREAMING_SNAKE_CASE = rouge_scorer.RougeScorer(rouge_types=lowerCamelCase__ ,use_stemmer=lowerCamelCase__ ) if use_aggregator: SCREAMING_SNAKE_CASE = scoring.BootstrapAggregator() else: SCREAMING_SNAKE_CASE = [] for ref, pred in zip(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = scorer.score(lowerCamelCase__ ,lowerCamelCase__ ) if use_aggregator: aggregator.add_scores(lowerCamelCase__ ) else: scores.append(lowerCamelCase__ ) if use_aggregator: SCREAMING_SNAKE_CASE = aggregator.aggregate() else: SCREAMING_SNAKE_CASE = {} for key in scores[0]: SCREAMING_SNAKE_CASE = [score[key] for score in scores] return result
296
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git_vision_model" def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git" def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]: '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = num_image_with_embedding SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
296
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[int] = "roberta-prelayernorm" def __init__( self : Optional[Any] ,lowerCamelCase__ : Dict=50265 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : int=3072 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Any=512 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : Any=1e-1_2 ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Any="absolute" ,lowerCamelCase__ : str=True ,lowerCamelCase__ : int=None ,**lowerCamelCase__ : Optional[Any] ,) -> str: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
296
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = spectrogram_length SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = num_audio_channels SCREAMING_SNAKE_CASE = hop_length SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = sampling_rate def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str: '''simple docstring''' def _flatten(lowerCamelCase__ : List[Any] ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[Any] = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE = feature_extractor( lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = TvltFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
296
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def __lowercase ( _SCREAMING_SNAKE_CASE=None ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = argparse.ArgumentParser(add_help=_SCREAMING_SNAKE_CASE , allow_abbrev=_SCREAMING_SNAKE_CASE ) # The main config parser SCREAMING_SNAKE_CASE = config_command_parser(_SCREAMING_SNAKE_CASE ) # The subparser to add commands to SCREAMING_SNAKE_CASE = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" ) # Then add other parsers with the parent parser default_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] ) update_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] ) return config_parser def __lowercase ( ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = get_config_parser() SCREAMING_SNAKE_CASE = config_parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): config_parser.print_help() exit(1 ) # Run args.func(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
296
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
1
from __future__ import annotations from math import gcd def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 3 , ) -> int | None: '''simple docstring''' if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: return (pow(_SCREAMING_SNAKE_CASE , 2 ) + step) % modulus for _ in range(_SCREAMING_SNAKE_CASE ): # These track the position within the cycle detection logic. SCREAMING_SNAKE_CASE = seed SCREAMING_SNAKE_CASE = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. SCREAMING_SNAKE_CASE = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. SCREAMING_SNAKE_CASE = gcd(hare - tortoise , _SCREAMING_SNAKE_CASE ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. SCREAMING_SNAKE_CASE = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'''{args.num} is probably prime''') else: SCREAMING_SNAKE_CASE_ = args.num // divisor print(F'''{args.num} = {divisor} * {quotient}''')
296
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
296
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "table-transformer" __snake_case : Union[str, Any] = ["past_key_values"] __snake_case : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) # set timm attributes to None SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.d_model class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' return 12
296
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Tuple = AudioLDMPipeline __snake_case : Tuple = TEXT_TO_AUDIO_PARAMS __snake_case : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS __snake_case : Union[str, Any] = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=(32, 64) ,class_embed_type="""simple_projection""" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" ,model_max_length=77 ) SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=16000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = SpeechTaHifiGan(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """vocoder""": vocoder, } return components def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any]=0 ) -> str: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A hammer hitting a wooden surface""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, } return inputs def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase__ ) == 256 SCREAMING_SNAKE_CASE = audio[:10] SCREAMING_SNAKE_CASE = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]] # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )] SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer( lowerCamelCase__ ,padding="""max_length""" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors="""pt""" ,) SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""].to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder( lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE = F.normalize(lowerCamelCase__ ,dim=-1 ) SCREAMING_SNAKE_CASE = prompt_embeds # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 3 * ["""this is a negative prompt"""] SCREAMING_SNAKE_CASE = negative_prompt SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]] # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )] SCREAMING_SNAKE_CASE = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer( lowerCamelCase__ ,padding="""max_length""" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors="""pt""" ,) SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""].to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder( lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = text_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE = F.normalize(lowerCamelCase__ ,dim=-1 ) embeds.append(lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = embeds # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """egg cracking""" SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase__ ) == 256 SCREAMING_SNAKE_CASE = audio[:10] SCREAMING_SNAKE_CASE = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """A hammer hitting a wooden surface""" # test num_waveforms_per_prompt=1 (default) SCREAMING_SNAKE_CASE = audioldm_pipe(lowerCamelCase__ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = audioldm_pipe(lowerCamelCase__ ,num_inference_steps=2 ,num_waveforms_per_prompt=lowerCamelCase__ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=lowerCamelCase__ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.016 SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.032 def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ["""hey"""] SCREAMING_SNAKE_CASE = audioldm_pipe(lowerCamelCase__ ,num_inference_steps=1 ) SCREAMING_SNAKE_CASE = output.audios.shape assert audio_shape == (1, 256) SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config config.model_in_dim *= 2 SCREAMING_SNAKE_CASE = SpeechTaHifiGan(lowerCamelCase__ ).to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(lowerCamelCase__ ,num_inference_steps=1 ) SCREAMING_SNAKE_CASE = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase__ ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ) @slow class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any="cpu" ,lowerCamelCase__ : Optional[int]=torch.floataa ,lowerCamelCase__ : List[str]=0 ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 8, 128, 16) ) SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A hammer hitting a wooden surface""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 2.5, } return inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 25 SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ).audios[0] assert audio.ndim == 1 assert len(lowerCamelCase__ ) == 81920 SCREAMING_SNAKE_CASE = audio[77230:77240] SCREAMING_SNAKE_CASE = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(lowerCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(**lowerCamelCase__ ).audios[0] assert audio.ndim == 1 assert len(lowerCamelCase__ ) == 81920 SCREAMING_SNAKE_CASE = audio[27780:27790] SCREAMING_SNAKE_CASE = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
296
from collections import defaultdict from math import gcd def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ): if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1: continue SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
296
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = job["""started_at"""] SCREAMING_SNAKE_CASE = job["""completed_at"""] SCREAMING_SNAKE_CASE = date_parser.parse(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = date_parser.parse(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = round((end_datetime - start_datetime).total_seconds() / 60.0 ) SCREAMING_SNAKE_CASE = start SCREAMING_SNAKE_CASE = end SCREAMING_SNAKE_CASE = duration_in_min return job_info def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = None if token is not None: SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""} SCREAMING_SNAKE_CASE = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" SCREAMING_SNAKE_CASE = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() SCREAMING_SNAKE_CASE = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} ) SCREAMING_SNAKE_CASE = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json() job_time.update({job["""name"""]: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} ) return job_time except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v['duration']}''')
296
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
1
import operator as op SCREAMING_SNAKE_CASE_ = """scaler.pt""" SCREAMING_SNAKE_CASE_ = """pytorch_model""" SCREAMING_SNAKE_CASE_ = """random_states""" SCREAMING_SNAKE_CASE_ = """optimizer""" SCREAMING_SNAKE_CASE_ = """scheduler""" SCREAMING_SNAKE_CASE_ = """pytorch_model.bin""" SCREAMING_SNAKE_CASE_ = """pytorch_model.bin.index.json""" SCREAMING_SNAKE_CASE_ = """model.safetensors""" SCREAMING_SNAKE_CASE_ = """model.safetensors.index.json""" SCREAMING_SNAKE_CASE_ = """1.10.2""" SCREAMING_SNAKE_CASE_ = """py38""" SCREAMING_SNAKE_CASE_ = """4.17.0""" SCREAMING_SNAKE_CASE_ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] SCREAMING_SNAKE_CASE_ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] SCREAMING_SNAKE_CASE_ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] SCREAMING_SNAKE_CASE_ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] SCREAMING_SNAKE_CASE_ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] SCREAMING_SNAKE_CASE_ = """2.0.1""" SCREAMING_SNAKE_CASE_ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] SCREAMING_SNAKE_CASE_ = ["""default""", """reduce-overhead""", """max-autotune"""] SCREAMING_SNAKE_CASE_ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 SCREAMING_SNAKE_CASE_ = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] SCREAMING_SNAKE_CASE_ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] SCREAMING_SNAKE_CASE_ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
296
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = SwinConfig() SCREAMING_SNAKE_CASE = swin_name.split("""_""" ) SCREAMING_SNAKE_CASE = name_split[1] SCREAMING_SNAKE_CASE = int(name_split[4] ) SCREAMING_SNAKE_CASE = int(name_split[3][-1] ) if model_size == "tiny": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 6, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE = 1_28 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE = 1_92 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (6, 12, 24, 48) if "in22k" in swin_name: SCREAMING_SNAKE_CASE = 2_18_41 else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = img_size SCREAMING_SNAKE_CASE = num_classes SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = window_size return config def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: SCREAMING_SNAKE_CASE = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "norm.bias": SCREAMING_SNAKE_CASE = """layernorm.bias""" if "head" in name: SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" ) else: SCREAMING_SNAKE_CASE = """swin.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[1] ) SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[ :dim ] SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE = val[ -dim: ] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
296
1
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = ["""names""", """prefix"""] SCREAMING_SNAKE_CASE_ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""] SCREAMING_SNAKE_CASE_ = ["""encoding_errors""", """on_bad_lines"""] SCREAMING_SNAKE_CASE_ = ["""date_format"""] @dataclass class UpperCamelCase__ ( datasets.BuilderConfig ): '''simple docstring''' __snake_case : str = "," __snake_case : Optional[str] = None __snake_case : Optional[Union[int, List[int], str]] = "infer" __snake_case : Optional[List[str]] = None __snake_case : Optional[List[str]] = None __snake_case : Optional[Union[int, str, List[int], List[str]]] = None __snake_case : Optional[Union[List[int], List[str]]] = None __snake_case : Optional[str] = None __snake_case : bool = True __snake_case : Optional[Literal["c", "python", "pyarrow"]] = None __snake_case : Dict[Union[int, str], Callable[[Any], Any]] = None __snake_case : Optional[list] = None __snake_case : Optional[list] = None __snake_case : bool = False __snake_case : Optional[Union[int, List[int]]] = None __snake_case : Optional[int] = None __snake_case : Optional[Union[str, List[str]]] = None __snake_case : bool = True __snake_case : bool = True __snake_case : bool = False __snake_case : bool = True __snake_case : Optional[str] = None __snake_case : str = "." __snake_case : Optional[str] = None __snake_case : str = '"' __snake_case : int = 0 __snake_case : Optional[str] = None __snake_case : Optional[str] = None __snake_case : Optional[str] = None __snake_case : Optional[str] = None __snake_case : bool = True __snake_case : bool = True __snake_case : int = 0 __snake_case : bool = True __snake_case : bool = False __snake_case : Optional[str] = None __snake_case : int = 10000 __snake_case : Optional[datasets.Features] = None __snake_case : Optional[str] = "strict" __snake_case : Literal["error", "warn", "skip"] = "error" __snake_case : Optional[str] = None def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: '''simple docstring''' if self.delimiter is not None: SCREAMING_SNAKE_CASE = self.delimiter if self.column_names is not None: SCREAMING_SNAKE_CASE = self.column_names @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,lowerCamelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCamelCase__ ( datasets.ArrowBasedBuilder ): '''simple docstring''' __snake_case : Union[str, Any] = CsvConfig def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCamelCase__ ,(str, list, tuple) ): SCREAMING_SNAKE_CASE = data_files if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = [files] SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )] SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = [files] SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={"""files""": files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if all(not require_storage_cast(lowerCamelCase__ ) for feature in self.config.features.values() ): # cheaper cast SCREAMING_SNAKE_CASE = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=lowerCamelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE = table_cast(lowerCamelCase__ ,lowerCamelCase__ ) return pa_table def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str SCREAMING_SNAKE_CASE = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase__ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ): SCREAMING_SNAKE_CASE = pd.read_csv(lowerCamelCase__ ,iterator=lowerCamelCase__ ,dtype=lowerCamelCase__ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = pa.Table.from_pandas(lowerCamelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" ) raise
296
import os from distutils.util import strtobool def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' for e in env_keys: SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
296
1
import math SCREAMING_SNAKE_CASE_ = 1_0 SCREAMING_SNAKE_CASE_ = 7 SCREAMING_SNAKE_CASE_ = BALLS_PER_COLOUR * NUM_COLOURS def __lowercase ( _SCREAMING_SNAKE_CASE = 20 ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = math.comb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return F"""{result:.9f}""" if __name__ == "__main__": print(solution(2_0))
296
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __snake_case : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __snake_case : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __snake_case : str = field( default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) __snake_case : str = field( default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __snake_case : str = field( default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __snake_case : str = field( default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __snake_case : str = field( default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) __snake_case : str = field( default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) __snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" ,lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
296
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Tuple ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[int] ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" ,lowerCamelCase__ ,) super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
296
import math import unittest def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,) self.assertFalse( is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
296
1
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Optional[Any] = BioGptTokenizer __snake_case : List[str] = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) ) SCREAMING_SNAKE_CASE = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) ) with open(self.merges_file ,"""w""" ) as fp: fp.write("""\n""".join(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = """lower newer""" SCREAMING_SNAKE_CASE = """lower newer""" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = BioGptTokenizer(self.vocab_file ,self.merges_file ) SCREAMING_SNAKE_CASE = """lower""" SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""] SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""] SCREAMING_SNAKE_CASE = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
296
import random class UpperCamelCase__ : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]: '''simple docstring''' SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in plain: SCREAMING_SNAKE_CASE = random.randint(1 ,300 ) SCREAMING_SNAKE_CASE = (i + k) * k cipher.append(lowerCamelCase__ ) key.append(lowerCamelCase__ ) return cipher, key @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] for i in range(len(lowerCamelCase__ ) ): SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowerCamelCase__ ) ) return "".join(lowerCamelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
296
1
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Parse args SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = parser.parse_known_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE = parse_unknown_args(_SCREAMING_SNAKE_CASE ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = "efficientformer" def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_expansion_ratio SCREAMING_SNAKE_CASE = downsamples SCREAMING_SNAKE_CASE = dim SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = resolution SCREAMING_SNAKE_CASE = pool_size SCREAMING_SNAKE_CASE = downsample_patch_size SCREAMING_SNAKE_CASE = downsample_stride SCREAMING_SNAKE_CASE = downsample_pad SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = num_metaad_blocks SCREAMING_SNAKE_CASE = distillation SCREAMING_SNAKE_CASE = use_layer_scale SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = batch_norm_eps
296
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[int] = "gpt_neox" def __init__( self : Any ,lowerCamelCase__ : Optional[int]=50432 ,lowerCamelCase__ : List[Any]=6144 ,lowerCamelCase__ : Dict=44 ,lowerCamelCase__ : Dict=64 ,lowerCamelCase__ : Tuple=24576 ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : int=0.25 ,lowerCamelCase__ : Any=10000 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : int=2048 ,lowerCamelCase__ : Union[str, Any]=0.02 ,lowerCamelCase__ : List[str]=1e-5 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[str]=0 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : List[Any] ,) -> str: '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""" ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,lowerCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F"""got {self.rope_scaling}""" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("""type""" ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("""factor""" ,lowerCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
296
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if "://" in dataset_path: SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1] return dataset_path def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) ) else: fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> None: '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = threading.Lock()
296
1
import warnings from ..trainer import Trainer from ..utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : str ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,lowerCamelCase__ ,) super().__init__(args=lowerCamelCase__ ,**lowerCamelCase__ )
296
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.dummy_uncond_unet SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256""" SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
296
1
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = limit + 1 SCREAMING_SNAKE_CASE = [0] * limit for first_term in range(1 , _SCREAMING_SNAKE_CASE ): for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a SCREAMING_SNAKE_CASE = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
296
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]: '''simple docstring''' super().__init__( features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = Generator( cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,) SCREAMING_SNAKE_CASE = self.builder.as_dataset( split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory ) return dataset
296
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
296
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) __snake_case : Optional[str] = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
296
1
SCREAMING_SNAKE_CASE_ = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ SCREAMING_SNAKE_CASE_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] SCREAMING_SNAKE_CASE_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
296
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = TextToVideoSDPipeline __snake_case : int = TEXT_TO_IMAGE_PARAMS __snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __snake_case : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """np""" SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
296
1
import os def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = len(grid[0] ) SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(n_rows - 3 ): SCREAMING_SNAKE_CASE = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] SCREAMING_SNAKE_CASE = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: SCREAMING_SNAKE_CASE = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: SCREAMING_SNAKE_CASE = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) SCREAMING_SNAKE_CASE = max( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if max_product > largest: SCREAMING_SNAKE_CASE = max_product return largest def __lowercase ( ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = [] with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + """/grid.txt""" ) as file: for line in file: grid.append(line.strip("""\n""" ).split(""" """ ) ) SCREAMING_SNAKE_CASE = [[int(_SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(_SCREAMING_SNAKE_CASE ) )] return largest_product(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
296
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("""Invalid input""" ) SCREAMING_SNAKE_CASE = 10**n SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
296
1
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
296
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : torch.FloatTensor class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : Tuple ,lowerCamelCase__ : int = 65536 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : str = "fourier" ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,lowerCamelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,lowerCamelCase__ : Tuple[str] = "UNetMidBlock1D" ,lowerCamelCase__ : str = None ,lowerCamelCase__ : Tuple[int] = (32, 32, 64) ,lowerCamelCase__ : str = None ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = False ,) -> List[Any]: '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE = sample_size # time if time_embedding_type == "fourier": SCREAMING_SNAKE_CASE = GaussianFourierProjection( embedding_size=8 ,set_W_to_weight=lowerCamelCase__ ,log=lowerCamelCase__ ,flip_sin_to_cos=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 2 * block_out_channels[0] elif time_embedding_type == "positional": SCREAMING_SNAKE_CASE = Timesteps( block_out_channels[0] ,flip_sin_to_cos=lowerCamelCase__ ,downscale_freq_shift=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = block_out_channels[0] if use_timestep_embedding: SCREAMING_SNAKE_CASE = block_out_channels[0] * 4 SCREAMING_SNAKE_CASE = TimestepEmbedding( in_channels=lowerCamelCase__ ,time_embed_dim=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,out_dim=block_out_channels[0] ,) SCREAMING_SNAKE_CASE = nn.ModuleList([] ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = nn.ModuleList([] ) SCREAMING_SNAKE_CASE = None # down SCREAMING_SNAKE_CASE = in_channels for i, down_block_type in enumerate(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = output_channel SCREAMING_SNAKE_CASE = block_out_channels[i] if i == 0: input_channel += extra_in_channels SCREAMING_SNAKE_CASE = i == len(lowerCamelCase__ ) - 1 SCREAMING_SNAKE_CASE = get_down_block( lowerCamelCase__ ,num_layers=lowerCamelCase__ ,in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,) self.down_blocks.append(lowerCamelCase__ ) # mid SCREAMING_SNAKE_CASE = get_mid_block( lowerCamelCase__ ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=lowerCamelCase__ ,add_downsample=lowerCamelCase__ ,) # up SCREAMING_SNAKE_CASE = list(reversed(lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = reversed_block_out_channels[0] if out_block_type is None: SCREAMING_SNAKE_CASE = out_channels else: SCREAMING_SNAKE_CASE = block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = output_channel SCREAMING_SNAKE_CASE = ( reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels ) SCREAMING_SNAKE_CASE = i == len(lowerCamelCase__ ) - 1 SCREAMING_SNAKE_CASE = get_up_block( lowerCamelCase__ ,num_layers=lowerCamelCase__ ,in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,) self.up_blocks.append(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output_channel # out SCREAMING_SNAKE_CASE = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 ) SCREAMING_SNAKE_CASE = get_out_block( out_block_type=lowerCamelCase__ ,num_groups_out=lowerCamelCase__ ,embed_dim=block_out_channels[0] ,out_channels=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,fc_dim=block_out_channels[-1] // 4 ,) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Union[torch.Tensor, float, int] ,lowerCamelCase__ : bool = True ,) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' SCREAMING_SNAKE_CASE = timestep if not torch.is_tensor(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE = timesteps[None].to(sample.device ) SCREAMING_SNAKE_CASE = self.time_proj(lowerCamelCase__ ) if self.config.use_timestep_embedding: SCREAMING_SNAKE_CASE = self.time_mlp(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = timestep_embed[..., None] SCREAMING_SNAKE_CASE = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) SCREAMING_SNAKE_CASE = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down SCREAMING_SNAKE_CASE = () for downsample_block in self.down_blocks: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = downsample_block(hidden_states=lowerCamelCase__ ,temb=lowerCamelCase__ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: SCREAMING_SNAKE_CASE = self.mid_block(lowerCamelCase__ ,lowerCamelCase__ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): SCREAMING_SNAKE_CASE = down_block_res_samples[-1:] SCREAMING_SNAKE_CASE = down_block_res_samples[:-1] SCREAMING_SNAKE_CASE = upsample_block(lowerCamelCase__ ,res_hidden_states_tuple=lowerCamelCase__ ,temb=lowerCamelCase__ ) # 5. post-process if self.out_block: SCREAMING_SNAKE_CASE = self.out_block(lowerCamelCase__ ,lowerCamelCase__ ) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCamelCase__ )
296
from pathlib import Path import fire def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for path in src_dir.iterdir(): SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n] SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name ) print(_SCREAMING_SNAKE_CASE ) dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": fire.Fire(minify)
296
1
import os SCREAMING_SNAKE_CASE_ = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0} def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 while index < len(_SCREAMING_SNAKE_CASE ) - 1: SCREAMING_SNAKE_CASE = SYMBOLS[numerals[index]] SCREAMING_SNAKE_CASE = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """""" SCREAMING_SNAKE_CASE = num // 10_00 numerals += m_count * "M" num %= 10_00 SCREAMING_SNAKE_CASE = num // 1_00 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_00 SCREAMING_SNAKE_CASE = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def __lowercase ( _SCREAMING_SNAKE_CASE = "/p089_roman.txt" ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + roman_numerals_filename ) as filea: SCREAMING_SNAKE_CASE = filea.readlines() for line in lines: SCREAMING_SNAKE_CASE = line.strip() SCREAMING_SNAKE_CASE = parse_roman_numerals(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = generate_roman_numerals(_SCREAMING_SNAKE_CASE ) savings += len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
296
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
296
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git_vision_model" def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git" def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]: '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = num_image_with_embedding SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
296
1
from math import ceil, sqrt def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: SCREAMING_SNAKE_CASE = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
296
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = spectrogram_length SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = num_audio_channels SCREAMING_SNAKE_CASE = hop_length SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = sampling_rate def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str: '''simple docstring''' def _flatten(lowerCamelCase__ : List[Any] ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[Any] = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE = feature_extractor( lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = TvltFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
296
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
1
def __lowercase ( ) -> str: '''simple docstring''' for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 2 while i * i <= n: SCREAMING_SNAKE_CASE = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __lowercase ( ) -> Any: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 5_00 ) if __name__ == "__main__": print(solution())
296
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
296
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F"""Building PyTorch model from configuration: {config}""" ) SCREAMING_SNAKE_CASE = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
296
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "table-transformer" __snake_case : Union[str, Any] = ["past_key_values"] __snake_case : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) # set timm attributes to None SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.d_model class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' return 12
296
1
def __lowercase ( ) -> int: '''simple docstring''' return [ a * b * (10_00 - a - b) for a in range(1 , 9_99 ) for b in range(_SCREAMING_SNAKE_CASE , 9_99 ) if (a * a + b * b == (10_00 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'''{solution() = }''')
296
from collections import defaultdict from math import gcd def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ): if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1: continue SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
296
1
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = 8 # DPR tok SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) ) SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> DPRContextEncoderTokenizer: '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_dataset() SCREAMING_SNAKE_CASE = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,) with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,) return retriever def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : bool ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_dataset() SCREAMING_SNAKE_CASE = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""custom""" ,) if from_disk: SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""dataset""" ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""index.faiss""" ) dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname ,"""index.faiss""" ) ) dataset.drop_index("""embeddings""" ) dataset.save_to_disk(os.path.join(self.tmpdirname ,"""dataset""" ) ) del dataset SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,) else: SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,lowerCamelCase__ ) ,) return retriever def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""hf_bert_base.hnswSQ8_correct_phi_128.c_index""" ) dataset.save_faiss_index("""embeddings""" ,index_file_name + """.index.dpr""" ) pickle.dump(dataset["""id"""] ,open(index_file_name + """.index_meta.dpr""" ,"""wb""" ) ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""psgs_w100.tsv.pkl""" ) SCREAMING_SNAKE_CASE = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset} pickle.dump(lowerCamelCase__ ,open(lowerCamelCase__ ,"""wb""" ) ) SCREAMING_SNAKE_CASE = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""legacy""" ,index_path=self.tmpdirname ,) SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ) return retriever def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=lowerCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase__ ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCamelCase__ ) self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: SCREAMING_SNAKE_CASE = self.get_dummy_dataset() retriever.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=lowerCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase__ ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCamelCase__ ) self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=lowerCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase__ ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCamelCase__ ) self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever() SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=lowerCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase__ ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""text"""] ) ,lowerCamelCase__ ) self.assertEqual(doc_dicts[0]["""text"""][0] ,"""bar""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""text"""][0] ,"""foo""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase__ ,n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' import torch SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE = retriever(lowerCamelCase__ ,lowerCamelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ( out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,np.ndarray ) SCREAMING_SNAKE_CASE = retriever( lowerCamelCase__ ,lowerCamelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=lowerCamelCase__ ,return_tensors="""pt""" ,) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ( # noqa: F841 out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], out["""doc_ids"""], ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCamelCase__ ,torch.Tensor ) self.assertIsInstance(lowerCamelCase__ ,torch.Tensor ) self.assertIsInstance(lowerCamelCase__ ,torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dpr_ctx_encoder_tokenizer() SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ ) retriever.set_ctx_encoder_tokenizer(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) SCREAMING_SNAKE_CASE = retriever(lowerCamelCase__ ,lowerCamelCase__ ,prefix=retriever.config.generator.prefix ,n_docs=lowerCamelCase__ ) self.assertEqual( len(lowerCamelCase__ ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) ,lowerCamelCase__ ) # check for doc token related keys in dictionary.
296
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE_ = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } SCREAMING_SNAKE_CASE_ = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 4 class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Any = VOCAB_FILES_NAMES __snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : int = "left" def __init__( self : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : Any="<unk>" ,lowerCamelCase__ : str="<sep>" ,lowerCamelCase__ : Tuple="<pad>" ,lowerCamelCase__ : Optional[int]="<cls>" ,lowerCamelCase__ : Any="<mask>" ,lowerCamelCase__ : Union[str, Any]=["<eop>", "<eod>"] ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,**lowerCamelCase__ : Union[str, Any] ,) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = remove_space SCREAMING_SNAKE_CASE = keep_accents SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: '''simple docstring''' return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : List[str] ,lowerCamelCase__ : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ) -> Any: '''simple docstring''' if self.remove_space: SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() ) else: SCREAMING_SNAKE_CASE = inputs SCREAMING_SNAKE_CASE = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] ) if self.do_lower_case: SCREAMING_SNAKE_CASE = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.preprocess_text(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [] for piece in pieces: if len(lowerCamelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: SCREAMING_SNAKE_CASE = cur_pieces[1:] else: SCREAMING_SNAKE_CASE = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowerCamelCase__ ) else: new_pieces.append(lowerCamelCase__ ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> Union[str, Any]: '''simple docstring''' return self.sp_model.PieceToId(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' return self.sp_model.IdToPiece(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : str ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ ,""" """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = True ,**lowerCamelCase__ : List[Any] ,) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = [] sub_texts.append(lowerCamelCase__ ) else: current_sub_text.append(lowerCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE = self.clean_up_tokenization(lowerCamelCase__ ) return clean_text else: return text def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is not None: return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] return ([0] * len(lowerCamelCase__ )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCamelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ ,"""wb""" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,)
296
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = SwinConfig() SCREAMING_SNAKE_CASE = swin_name.split("""_""" ) SCREAMING_SNAKE_CASE = name_split[1] SCREAMING_SNAKE_CASE = int(name_split[4] ) SCREAMING_SNAKE_CASE = int(name_split[3][-1] ) if model_size == "tiny": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 6, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE = 1_28 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE = 1_92 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (6, 12, 24, 48) if "in22k" in swin_name: SCREAMING_SNAKE_CASE = 2_18_41 else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = img_size SCREAMING_SNAKE_CASE = num_classes SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = window_size return config def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: SCREAMING_SNAKE_CASE = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "norm.bias": SCREAMING_SNAKE_CASE = """layernorm.bias""" if "head" in name: SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" ) else: SCREAMING_SNAKE_CASE = """swin.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[1] ) SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[ :dim ] SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE = val[ -dim: ] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
296
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase__ ,"""embed_dim""" ) ) self.parent.assertTrue(hasattr(lowerCamelCase__ ,"""num_heads""" ) ) class UpperCamelCase__ : '''simple docstring''' def __init__( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple=13 ,lowerCamelCase__ : Dict=64 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : str=[16, 48, 96] ,lowerCamelCase__ : int=[1, 3, 6] ,lowerCamelCase__ : int=[1, 2, 10] ,lowerCamelCase__ : Tuple=[7, 3, 3] ,lowerCamelCase__ : Dict=[4, 2, 2] ,lowerCamelCase__ : str=[2, 1, 1] ,lowerCamelCase__ : Any=[2, 2, 2] ,lowerCamelCase__ : Optional[Any]=[False, False, True] ,lowerCamelCase__ : int=[0.0, 0.0, 0.0] ,lowerCamelCase__ : int=0.02 ,lowerCamelCase__ : Union[str, Any]=1e-1_2 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=2 ,) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: # create a random int32 tensor of given shape SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: '''simple docstring''' return CvtConfig( image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = TFCvtModel(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,training=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for i in range(len(self.depth ) ): SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = TFCvtForImageClassification(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,labels=lowerCamelCase__ ,training=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () __snake_case : Optional[int] = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) __snake_case : List[Any] = False __snake_case : int = False __snake_case : Optional[int] = False __snake_case : int = False __snake_case : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TFCvtModelTester(self ) SCREAMING_SNAKE_CASE = TFCvtConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(lowerCamelCase__ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' def check_hidden_states_output(lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ): SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = len(self.model_tester.depth ) self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def __lowercase ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""tf""" ) # forward pass SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ) # verify the logits SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,lowerCamelCase__ ,atol=1e-4 ) )
296
import os from distutils.util import strtobool def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' for e in env_keys: SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
296
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Tuple = CanineTokenizer __snake_case : Dict = False def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,**lowerCamelCase__ : List[Any] ) -> CanineTokenizer: '''simple docstring''' SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = 1024 return tokenizer @require_torch def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.canine_tokenizer SCREAMING_SNAKE_CASE = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off SCREAMING_SNAKE_CASE = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] # fmt: on SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors="""pt""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) self.assertEqual((2, 39) ,batch.input_ids.shape ) self.assertEqual((2, 39) ,batch.attention_mask.shape ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.canine_tokenizer SCREAMING_SNAKE_CASE = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" ,lowerCamelCase__ ) self.assertIn("""attention_mask""" ,lowerCamelCase__ ) self.assertIn("""token_type_ids""" ,lowerCamelCase__ ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.canine_tokenizer SCREAMING_SNAKE_CASE = [ """What's the weater?""", """It's about 25 degrees.""", ] SCREAMING_SNAKE_CASE = tokenizer( text_target=lowerCamelCase__ ,max_length=32 ,padding="""max_length""" ,truncation=lowerCamelCase__ ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length ,42 ) # Now let's start the test SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running""" SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = after_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) shutil.rmtree(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running""" SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: SCREAMING_SNAKE_CASE = chr(0Xe_007 ) additional_special_tokens.append(lowerCamelCase__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = after_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) self.assertIn(lowerCamelCase__ ,after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length ,42 ) SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCamelCase__ ,model_max_length=43 ) self.assertEqual(tokenizer.model_max_length ,43 ) shutil.rmtree(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_clean_sequence(lowerCamelCase__ ) # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE = 0Xe_005 SCREAMING_SNAKE_CASE = chr(lowerCamelCase__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) ,1 ) SCREAMING_SNAKE_CASE = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ ,input_encoded + special_token_id ) SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): SCREAMING_SNAKE_CASE = chr(0Xe_005 ) SCREAMING_SNAKE_CASE = chr(0Xe_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=lowerCamelCase__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) ,1 ) self.assertEqual(len(lowerCamelCase__ ) ,1 ) self.assertEqual(token_a[0] ,lowerCamelCase__ ) self.assertEqual(token_a[0] ,lowerCamelCase__ ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE = 0Xe_006 SCREAMING_SNAKE_CASE = chr(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCamelCase__ ) tokenizer.from_pretrained(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""special_tokens_map.json""" ) ,encoding="""utf-8""" ) as json_file: SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""tokenizer_config.json""" ) ,encoding="""utf-8""" ) as json_file: SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ ) # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE = 0Xe_006 SCREAMING_SNAKE_CASE = chr(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [new_token_a] SCREAMING_SNAKE_CASE = [new_token_a] with open(os.path.join(lowerCamelCase__ ,"""special_tokens_map.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile: json.dump(lowerCamelCase__ ,lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""tokenizer_config.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile: json.dump(lowerCamelCase__ ,lowerCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(lowerCamelCase__ ,extra_ids=0 ) self.assertIn(lowerCamelCase__ ,tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,) SCREAMING_SNAKE_CASE = 0Xe_007 SCREAMING_SNAKE_CASE = chr(lowerCamelCase__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained SCREAMING_SNAKE_CASE = [AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ )] SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained( lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,extra_ids=0 ) self.assertIn(lowerCamelCase__ ,tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): SCREAMING_SNAKE_CASE = """hello world""" if self.space_between_special_tokens: SCREAMING_SNAKE_CASE = """[CLS] hello world [SEP]""" else: SCREAMING_SNAKE_CASE = input SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCamelCase__ ,spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCamelCase__ ,[output, output.lower()] ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): SCREAMING_SNAKE_CASE = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] SCREAMING_SNAKE_CASE = """a""" SCREAMING_SNAKE_CASE = ord(lowerCamelCase__ ) for attr in attributes_list: setattr(lowerCamelCase__ ,attr + """_id""" ,lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ ,attr + """_id""" ) ,lowerCamelCase__ ) setattr(lowerCamelCase__ ,attr + """_id""" ,lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ ,attr + """_id""" ) ,lowerCamelCase__ ) setattr(lowerCamelCase__ ,"""additional_special_tokens_ids""" ,[] ) self.assertListEqual(getattr(lowerCamelCase__ ,"""additional_special_tokens""" ) ,[] ) self.assertListEqual(getattr(lowerCamelCase__ ,"""additional_special_tokens_ids""" ) ,[] ) SCREAMING_SNAKE_CASE = 0Xe_006 SCREAMING_SNAKE_CASE = chr(lowerCamelCase__ ) setattr(lowerCamelCase__ ,"""additional_special_tokens_ids""" ,[additional_special_token_id] ) self.assertListEqual(getattr(lowerCamelCase__ ,"""additional_special_tokens""" ) ,[additional_special_token] ) self.assertListEqual(getattr(lowerCamelCase__ ,"""additional_special_tokens_ids""" ) ,[additional_special_token_id] ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' pass
296
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __snake_case : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __snake_case : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __snake_case : str = field( default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) __snake_case : str = field( default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __snake_case : str = field( default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __snake_case : str = field( default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __snake_case : str = field( default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) __snake_case : str = field( default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) __snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" ,lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
296
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCamelCase__ : '''simple docstring''' def __init__( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple=13 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=24 ,lowerCamelCase__ : int=16 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : int=0.02 ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : int=2 ,) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = max_length SCREAMING_SNAKE_CASE = num_mel_bins SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = frequency_stride SCREAMING_SNAKE_CASE = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) SCREAMING_SNAKE_CASE = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 SCREAMING_SNAKE_CASE = (self.max_length - self.patch_size) // self.time_stride + 1 SCREAMING_SNAKE_CASE = frequency_out_dimension * time_out_dimension SCREAMING_SNAKE_CASE = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = self.get_config() return config, input_values, labels def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' return ASTConfig( patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ) = config_and_inputs SCREAMING_SNAKE_CASE = {"""input_values""": input_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Optional[Any] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) __snake_case : Optional[Any] = ( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) __snake_case : Tuple = False __snake_case : List[str] = False __snake_case : Optional[Any] = False __snake_case : Optional[Any] = False def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ) -> Tuple: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = ASTModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""AST does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["""input_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def __lowercase ( ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = torchaudio.load(_SCREAMING_SNAKE_CASE ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ) if is_torchaudio_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.default_feature_extractor SCREAMING_SNAKE_CASE = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.default_feature_extractor SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = prepare_audio() SCREAMING_SNAKE_CASE = audio.squeeze().numpy() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,return_tensors="""pt""" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
296
import math import unittest def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,) self.assertFalse( is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
296
1
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = "efficientformer" def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_expansion_ratio SCREAMING_SNAKE_CASE = downsamples SCREAMING_SNAKE_CASE = dim SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = resolution SCREAMING_SNAKE_CASE = pool_size SCREAMING_SNAKE_CASE = downsample_patch_size SCREAMING_SNAKE_CASE = downsample_stride SCREAMING_SNAKE_CASE = downsample_pad SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = num_metaad_blocks SCREAMING_SNAKE_CASE = distillation SCREAMING_SNAKE_CASE = use_layer_scale SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = batch_norm_eps
296
import random class UpperCamelCase__ : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]: '''simple docstring''' SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in plain: SCREAMING_SNAKE_CASE = random.randint(1 ,300 ) SCREAMING_SNAKE_CASE = (i + k) * k cipher.append(lowerCamelCase__ ) key.append(lowerCamelCase__ ) return cipher, key @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] for i in range(len(lowerCamelCase__ ) ): SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowerCamelCase__ ) ) return "".join(lowerCamelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
296
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : str=32 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Optional[Any]=[10, 20, 30, 40] ,lowerCamelCase__ : Dict=[1, 1, 2, 1] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[str]="relu" ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Union[str, Any]=None ,) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embeddings_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Optional[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __snake_case : int = False __snake_case : Optional[Any] = False __snake_case : List[str] = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' def check_hidden_states_output(lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ): SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) @jax.jit def model_jitted(lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ): return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ ) with self.subTest("""JIT Enabled""" ): SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ) for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" ) SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ) # verify the logits SCREAMING_SNAKE_CASE = (1, 1000) self.assertEqual(outputs.logits.shape ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
296
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = "efficientformer" def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_expansion_ratio SCREAMING_SNAKE_CASE = downsamples SCREAMING_SNAKE_CASE = dim SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = resolution SCREAMING_SNAKE_CASE = pool_size SCREAMING_SNAKE_CASE = downsample_patch_size SCREAMING_SNAKE_CASE = downsample_stride SCREAMING_SNAKE_CASE = downsample_pad SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = num_metaad_blocks SCREAMING_SNAKE_CASE = distillation SCREAMING_SNAKE_CASE = use_layer_scale SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = batch_norm_eps
296
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[int] = "vision-encoder-decoder" __snake_case : Union[str, Any] = True def __init__( self : List[Any] ,**lowerCamelCase__ : Dict ) -> Any: '''simple docstring''' super().__init__(**lowerCamelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F"""A configuraton of type {self.model_type} cannot be instantiated because """ F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) SCREAMING_SNAKE_CASE = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE = decoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE = AutoConfig.for_model(lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.for_model(lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,**lowerCamelCase__ : Tuple ) -> PretrainedConfig: '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.encoder.to_dict() SCREAMING_SNAKE_CASE = self.decoder.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> float: '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' SCREAMING_SNAKE_CASE = OrderedDict() SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} SCREAMING_SNAKE_CASE = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : "PreTrainedTokenizerBase" ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional["TensorType"] = None ,) -> Mapping[str, Any]: '''simple docstring''' import torch SCREAMING_SNAKE_CASE = OrderedDict() SCREAMING_SNAKE_CASE = super().generate_dummy_inputs( lowerCamelCase__ ,batch_size=lowerCamelCase__ ,seq_length=lowerCamelCase__ ,is_pair=lowerCamelCase__ ,framework=lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_input["""input_ids"""].shape SCREAMING_SNAKE_CASE = (batch, encoder_sequence, self._config.encoder_hidden_size) SCREAMING_SNAKE_CASE = dummy_input.pop("""input_ids""" ) SCREAMING_SNAKE_CASE = dummy_input.pop("""attention_mask""" ) SCREAMING_SNAKE_CASE = torch.zeros(lowerCamelCase__ ) return common_inputs class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : PretrainedConfig ) -> OnnxConfig: '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : str = "default" ) -> OnnxConfig: '''simple docstring''' SCREAMING_SNAKE_CASE = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ ,lowerCamelCase__ )
296
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if "://" in dataset_path: SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1] return dataset_path def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) ) else: fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> None: '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = threading.Lock()
296
1
from PIL import Image def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image: '''simple docstring''' def brightness(_SCREAMING_SNAKE_CASE ) -> float: return 1_28 + level + (c - 1_28) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE_ = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
296
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.dummy_uncond_unet SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256""" SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
296
1
def __lowercase ( _SCREAMING_SNAKE_CASE = 10**12 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
296
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]: '''simple docstring''' super().__init__( features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = Generator( cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,) SCREAMING_SNAKE_CASE = self.builder.as_dataset( split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory ) return dataset
296
1
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) __snake_case : Optional[str] = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
296
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) __snake_case : Optional[str] = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
296
1
from collections.abc import Callable class UpperCamelCase__ : '''simple docstring''' def __init__( self : List[str] ,lowerCamelCase__ : Callable | None = None ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = [] # Stores indexes of each item for supporting updates and deletion. SCREAMING_SNAKE_CASE = {} # Stores current size of heap. SCREAMING_SNAKE_CASE = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. SCREAMING_SNAKE_CASE = key or (lambda lowerCamelCase__ : x) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ) -> int | None: '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ) -> int | None: '''simple docstring''' SCREAMING_SNAKE_CASE = int(2 * i + 1 ) return left if 0 < left < self.size else None def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : int ) -> int | None: '''simple docstring''' SCREAMING_SNAKE_CASE = int(2 * i + 2 ) return right if 0 < right < self.size else None def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.arr[j], self.arr[i] def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> bool: '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self._left(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self._right(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = i if left is not None and not self._cmp(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = left if right is not None and not self._cmp(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = right return valid_parent def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = self._parent(lowerCamelCase__ ) while parent is not None and not self._cmp(lowerCamelCase__ ,lowerCamelCase__ ): self._swap(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = parent, self._parent(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = self._get_valid_parent(lowerCamelCase__ ) while valid_parent != index: self._swap(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = valid_parent, self._get_valid_parent(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None: '''simple docstring''' if item not in self.pos_map: return SCREAMING_SNAKE_CASE = self.pos_map[item] SCREAMING_SNAKE_CASE = [item, self.key(lowerCamelCase__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowerCamelCase__ ) self._heapify_down(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ) -> None: '''simple docstring''' if item not in self.pos_map: return SCREAMING_SNAKE_CASE = self.pos_map[item] del self.pos_map[item] SCREAMING_SNAKE_CASE = self.arr[self.size - 1] SCREAMING_SNAKE_CASE = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowerCamelCase__ ) self._heapify_down(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowerCamelCase__ )] ) else: SCREAMING_SNAKE_CASE = [item, self.key(lowerCamelCase__ )] SCREAMING_SNAKE_CASE = self.size self.size += 1 self._heapify_up(self.size - 1 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> tuple | None: '''simple docstring''' return self.arr[0] if self.size else None def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> tuple | None: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __lowercase ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
296
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = TextToVideoSDPipeline __snake_case : int = TEXT_TO_IMAGE_PARAMS __snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __snake_case : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """np""" SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
296
1
import math from collections.abc import Callable def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' SCREAMING_SNAKE_CASE = xa SCREAMING_SNAKE_CASE = xa while True: if x_n == x_na or function(_SCREAMING_SNAKE_CASE ) == function(_SCREAMING_SNAKE_CASE ): raise ZeroDivisionError("""float division by zero, could not find root""" ) SCREAMING_SNAKE_CASE = x_na - ( function(_SCREAMING_SNAKE_CASE ) / ((function(_SCREAMING_SNAKE_CASE ) - function(_SCREAMING_SNAKE_CASE )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na SCREAMING_SNAKE_CASE = x_na SCREAMING_SNAKE_CASE = x_na def __lowercase ( _SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' return math.pow(_SCREAMING_SNAKE_CASE , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
296
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("""Invalid input""" ) SCREAMING_SNAKE_CASE = 10**n SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
296
1
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar SCREAMING_SNAKE_CASE_ = TypeVar("""T""") class UpperCamelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self : Optional[Any] ,lowerCamelCase__ : bool = True ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = {} # dictionary of lists SCREAMING_SNAKE_CASE = directed def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : T ,lowerCamelCase__ : T ) -> GraphAdjacencyList[T]: '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) self.adj_list[destination_vertex].append(lowerCamelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE = [destination_vertex] SCREAMING_SNAKE_CASE = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE = [destination_vertex] SCREAMING_SNAKE_CASE = [] return self def __repr__( self : Tuple ) -> str: '''simple docstring''' return pformat(self.adj_list )
296
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
1
from math import factorial, radians def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ) -> float: '''simple docstring''' SCREAMING_SNAKE_CASE = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians SCREAMING_SNAKE_CASE = radians(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = angle_in_radians SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__("""doctest""").testmod()
296
from pathlib import Path import fire def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for path in src_dir.iterdir(): SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n] SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name ) print(_SCREAMING_SNAKE_CASE ) dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": fire.Fire(minify)
296
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) ) SCREAMING_SNAKE_CASE = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } SCREAMING_SNAKE_CASE = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 16000, """return_attention_mask""": False, """do_normalize""": True, } SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase__ ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + """\n""" ) with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + """\n""" ) # load decoder from hub SCREAMING_SNAKE_CASE = """hf-internal-testing/ngram-beam-search-decoder""" def SCREAMING_SNAKE_CASE__ ( self : int ,**lowerCamelCase__ : Tuple ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.add_kwargs_tokens_map.copy() kwargs.update(lowerCamelCase__ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,**lowerCamelCase__ : int ) -> Dict: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,**lowerCamelCase__ : List[Any] ) -> Any: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,lowerCamelCase__ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,lowerCamelCase__ ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(lowerCamelCase__ ,"""include""" ): WavaVecaProcessorWithLM( tokenizer=lowerCamelCase__ ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = floats_list((3, 1000) ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ) SCREAMING_SNAKE_CASE = processor(lowerCamelCase__ ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """This is a test string""" SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tuple=(2, 10, 16) ,lowerCamelCase__ : List[Any]=77 ) -> Union[str, Any]: '''simple docstring''' np.random.seed(lowerCamelCase__ ) return np.random.rand(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) SCREAMING_SNAKE_CASE = processor.decode(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = decoder.decode_beams(lowerCamelCase__ )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase__ ) else: with get_context(lowerCamelCase__ ).Pool() as pool: SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = list(lowerCamelCase__ ) with get_context("""fork""" ).Pool() as p: SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCamelCase__ ,decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text ) self.assertListEqual(lowerCamelCase__ ,decoded_processor.logit_score ) self.assertListEqual(lowerCamelCase__ ,decoded_processor.lm_score ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self._get_dummy_logits() SCREAMING_SNAKE_CASE = 15 SCREAMING_SNAKE_CASE = -20.0 SCREAMING_SNAKE_CASE = -4.0 SCREAMING_SNAKE_CASE = processor.batch_decode( lowerCamelCase__ ,beam_width=lowerCamelCase__ ,beam_prune_logp=lowerCamelCase__ ,token_min_logp=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = decoded_processor_out.text SCREAMING_SNAKE_CASE = list(lowerCamelCase__ ) with get_context("""fork""" ).Pool() as pool: SCREAMING_SNAKE_CASE = decoder.decode_beams_batch( lowerCamelCase__ ,lowerCamelCase__ ,beam_width=lowerCamelCase__ ,beam_prune_logp=lowerCamelCase__ ,token_min_logp=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out] SCREAMING_SNAKE_CASE = [d[0][2] for d in decoded_decoder_out] SCREAMING_SNAKE_CASE = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,lowerCamelCase__ ) self.assertTrue(np.array_equal(lowerCamelCase__ ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] ,lowerCamelCase__ ,atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCamelCase__ ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] ,lowerCamelCase__ ,atol=1e-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self._get_dummy_logits() SCREAMING_SNAKE_CASE = 2.0 SCREAMING_SNAKE_CASE = 5.0 SCREAMING_SNAKE_CASE = -20.0 SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = processor.batch_decode( lowerCamelCase__ ,alpha=lowerCamelCase__ ,beta=lowerCamelCase__ ,unk_score_offset=lowerCamelCase__ ,lm_score_boundary=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = decoded_processor_out.text SCREAMING_SNAKE_CASE = list(lowerCamelCase__ ) decoder.reset_params( alpha=lowerCamelCase__ ,beta=lowerCamelCase__ ,unk_score_offset=lowerCamelCase__ ,lm_score_boundary=lowerCamelCase__ ,) with get_context("""fork""" ).Pool() as pool: SCREAMING_SNAKE_CASE = decoder.decode_beams_batch( lowerCamelCase__ ,lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-20.0 ) self.assertEqual(lm_model.score_boundary ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key] SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = snapshot_download("""hf-internal-testing/processor_with_lm""" ) SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key] SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) SCREAMING_SNAKE_CASE = floats_list((3, 1000) ) SCREAMING_SNAKE_CASE = processor_wavaveca(lowerCamelCase__ ,return_tensors="""np""" ) SCREAMING_SNAKE_CASE = processor_auto(lowerCamelCase__ ,return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) SCREAMING_SNAKE_CASE = self._get_dummy_logits() SCREAMING_SNAKE_CASE = processor_wavaveca.batch_decode(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = processor_auto.batch_decode(lowerCamelCase__ ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_feature_extractor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_decoder() SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,) @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = [d[key] for d in offsets] return retrieved_list def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) SCREAMING_SNAKE_CASE = self._get_dummy_logits()[0] SCREAMING_SNAKE_CASE = processor.decode(lowerCamelCase__ ,output_word_offsets=lowerCamelCase__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) SCREAMING_SNAKE_CASE = self._get_dummy_logits() SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase__ ,output_word_offsets=lowerCamelCase__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(lowerCamelCase__ ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' import torch SCREAMING_SNAKE_CASE = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16000 ) ) SCREAMING_SNAKE_CASE = iter(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = next(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train SCREAMING_SNAKE_CASE = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values with torch.no_grad(): SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ).logits.cpu().numpy() SCREAMING_SNAKE_CASE = processor.decode(logits[0] ,output_word_offsets=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate SCREAMING_SNAKE_CASE = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] SCREAMING_SNAKE_CASE = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__ ,"""word""" ) ) ,lowerCamelCase__ ) self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__ ,"""word""" ) ) ,output.text ) # output times SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(lowerCamelCase__ ,"""start_time""" ) ) SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(lowerCamelCase__ ,"""end_time""" ) ) # fmt: off SCREAMING_SNAKE_CASE = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) SCREAMING_SNAKE_CASE = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=0.01 ) ) self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=0.01 ) )
296
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = """▁""" SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """sentencepiece.bpe.model"""} SCREAMING_SNAKE_CASE_ = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), } } SCREAMING_SNAKE_CASE_ = { """facebook/mbart-large-en-ro""": 1_0_2_4, """facebook/mbart-large-cc25""": 1_0_2_4, } # fmt: off SCREAMING_SNAKE_CASE_ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[Any] = VOCAB_FILES_NAMES __snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : Any = PRETRAINED_VOCAB_FILES_MAP __snake_case : Optional[Any] = ["input_ids", "attention_mask"] __snake_case : List[int] = [] __snake_case : List[int] = [] def __init__( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any="<s>" ,lowerCamelCase__ : Optional[int]="</s>" ,lowerCamelCase__ : Optional[Any]="</s>" ,lowerCamelCase__ : Optional[Any]="<s>" ,lowerCamelCase__ : Dict="<unk>" ,lowerCamelCase__ : List[str]="<pad>" ,lowerCamelCase__ : Any="<mask>" ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : str ,) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = len(self.sp_model ) SCREAMING_SNAKE_CASE = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase__ ) } SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """en_XX""" SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] ,lowerCamelCase__ : Any ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : str ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens ) SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) SCREAMING_SNAKE_CASE = src_lang SCREAMING_SNAKE_CASE = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ) -> Tuple: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(lowerCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Optional[Any] ) -> str: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ ,""" """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCamelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ ,"""wb""" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "en_XX" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "ro_RO" ,**lowerCamelCase__ : List[str] ,) -> BatchEncoding: '''simple docstring''' SCREAMING_SNAKE_CASE = src_lang SCREAMING_SNAKE_CASE = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Union[str, Any] ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code] def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = self.lang_code_to_id[lang] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
296
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git_vision_model" def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git" def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]: '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = num_image_with_embedding SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
296
1
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint SCREAMING_SNAKE_CASE_ = { """169M""": 1_2, """430M""": 2_4, """1B5""": 2_4, """3B""": 3_2, """7B""": 3_2, """14B""": 4_0, } SCREAMING_SNAKE_CASE_ = { """169M""": 7_6_8, """430M""": 1_0_2_4, """1B5""": 2_0_4_8, """3B""": 2_5_6_0, """7B""": 4_0_9_6, """14B""": 5_1_2_0, } def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = list(state_dict.keys() ) for name in state_dict_keys: SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE ) # emb -> embedding if name.startswith("""emb.""" ): SCREAMING_SNAKE_CASE = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): SCREAMING_SNAKE_CASE = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention SCREAMING_SNAKE_CASE = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _SCREAMING_SNAKE_CASE ) # ffn -> feed_forward SCREAMING_SNAKE_CASE = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _SCREAMING_SNAKE_CASE ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): SCREAMING_SNAKE_CASE = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): SCREAMING_SNAKE_CASE = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): SCREAMING_SNAKE_CASE = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": SCREAMING_SNAKE_CASE = """rwkv.""" + name SCREAMING_SNAKE_CASE = weight return state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: '''simple docstring''' if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) SCREAMING_SNAKE_CASE = 5_02_77 SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: SCREAMING_SNAKE_CASE = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) # 2. Build the config SCREAMING_SNAKE_CASE = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: SCREAMING_SNAKE_CASE = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) SCREAMING_SNAKE_CASE = RwkvConfig( vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(_SCREAMING_SNAKE_CASE ) # 3. Download model file then convert state_dict SCREAMING_SNAKE_CASE = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE ) # 4. Split in shards and save SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = shard_checkpoint(_SCREAMING_SNAKE_CASE ) for shard_file, shard in shards.items(): torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if index is not None: SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save the index as well with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n""" f.write(_SCREAMING_SNAKE_CASE ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) SCREAMING_SNAKE_CASE = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: SCREAMING_SNAKE_CASE = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size="""2GB""" ) tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint.""" ) parser.add_argument( """--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo.""" ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model.""" ) parser.add_argument( """--tokenizer_file""", default=None, type=str, help="""Path to the tokenizer file to use (if not provided, only the model is converted).""", ) parser.add_argument( """--size""", default=None, type=str, help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Push to the Hub the converted model.""", ) parser.add_argument( """--model_name""", default=None, type=str, help="""Name of the pushed model on the Hub, including the username / organization.""", ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
296
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = spectrogram_length SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = num_audio_channels SCREAMING_SNAKE_CASE = hop_length SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = sampling_rate def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str: '''simple docstring''' def _flatten(lowerCamelCase__ : List[Any] ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[Any] = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE = feature_extractor( lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = TvltFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
296
1
import cva import numpy as np class UpperCamelCase__ : '''simple docstring''' def __init__( self : List[str] ,lowerCamelCase__ : float ,lowerCamelCase__ : int ) -> List[Any]: '''simple docstring''' if k in (0.04, 0.06): SCREAMING_SNAKE_CASE = k SCREAMING_SNAKE_CASE = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : List[Any] ) -> str: '''simple docstring''' return str(self.k ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: '''simple docstring''' SCREAMING_SNAKE_CASE = cva.imread(lowerCamelCase__ ,0 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = img.shape SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = img.copy() SCREAMING_SNAKE_CASE = cva.cvtColor(lowerCamelCase__ ,cva.COLOR_GRAY2RGB ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = np.gradient(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = dx**2 SCREAMING_SNAKE_CASE = dy**2 SCREAMING_SNAKE_CASE = dx * dy SCREAMING_SNAKE_CASE = 0.04 SCREAMING_SNAKE_CASE = self.window_size // 2 for y in range(lowerCamelCase__ ,h - offset ): for x in range(lowerCamelCase__ ,w - offset ): SCREAMING_SNAKE_CASE = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE = (wxx * wyy) - (wxy**2) SCREAMING_SNAKE_CASE = wxx + wyy SCREAMING_SNAKE_CASE = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,255 ) return color_img, corner_list if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = HarrisCorner(0.04, 3) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
296
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = SwinConfig() SCREAMING_SNAKE_CASE = swin_name.split("""_""" ) SCREAMING_SNAKE_CASE = name_split[1] SCREAMING_SNAKE_CASE = int(name_split[4] ) SCREAMING_SNAKE_CASE = int(name_split[3][-1] ) if model_size == "tiny": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 6, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE = 1_28 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE = 1_92 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (6, 12, 24, 48) if "in22k" in swin_name: SCREAMING_SNAKE_CASE = 2_18_41 else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = img_size SCREAMING_SNAKE_CASE = num_classes SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = window_size return config def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: SCREAMING_SNAKE_CASE = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "norm.bias": SCREAMING_SNAKE_CASE = """layernorm.bias""" if "head" in name: SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" ) else: SCREAMING_SNAKE_CASE = """swin.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[1] ) SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[ :dim ] SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE = val[ -dim: ] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
296
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
296
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[Any] = "lxmert" __snake_case : str = {} def __init__( self : Tuple ,lowerCamelCase__ : int=30522 ,lowerCamelCase__ : List[Any]=768 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : int=9500 ,lowerCamelCase__ : str=1600 ,lowerCamelCase__ : Union[str, Any]=400 ,lowerCamelCase__ : Optional[Any]=3072 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : Optional[Any]=1e-1_2 ,lowerCamelCase__ : int=9 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Any=2048 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Optional[int]=6.67 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=True ,**lowerCamelCase__ : List[str] ,) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = num_qa_labels SCREAMING_SNAKE_CASE = num_object_labels SCREAMING_SNAKE_CASE = num_attr_labels SCREAMING_SNAKE_CASE = l_layers SCREAMING_SNAKE_CASE = x_layers SCREAMING_SNAKE_CASE = r_layers SCREAMING_SNAKE_CASE = visual_feat_dim SCREAMING_SNAKE_CASE = visual_pos_dim SCREAMING_SNAKE_CASE = visual_loss_normalizer SCREAMING_SNAKE_CASE = task_matched SCREAMING_SNAKE_CASE = task_mask_lm SCREAMING_SNAKE_CASE = task_obj_predict SCREAMING_SNAKE_CASE = task_qa SCREAMING_SNAKE_CASE = visual_obj_loss SCREAMING_SNAKE_CASE = visual_attr_loss SCREAMING_SNAKE_CASE = visual_feat_loss SCREAMING_SNAKE_CASE = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**lowerCamelCase__ )
296
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "table-transformer" __snake_case : Union[str, Any] = ["past_key_values"] __snake_case : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) # set timm attributes to None SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.d_model class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' return 12
296
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' pass @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = ort.SessionOptions() SCREAMING_SNAKE_CASE = False return options def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" ,revision="""onnx""" ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type="""np""" ,) SCREAMING_SNAKE_CASE = output.images SCREAMING_SNAKE_CASE = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" ,subfolder="""scheduler""" ,revision="""onnx""" ) SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" ,revision="""onnx""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""np""" ,) SCREAMING_SNAKE_CASE = output.images SCREAMING_SNAKE_CASE = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
296
from collections import defaultdict from math import gcd def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ): if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1: continue SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
296
1
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE_ = TypeVar("""KEY""") SCREAMING_SNAKE_CASE_ = TypeVar("""VAL""") @dataclass(frozen=lowerCAmelCase_ , slots=lowerCAmelCase_ ) class UpperCamelCase__ ( Generic[KEY, VAL] ): '''simple docstring''' __snake_case : KEY __snake_case : VAL class UpperCamelCase__ ( _Item ): '''simple docstring''' def __init__( self : List[str] ) -> None: '''simple docstring''' super().__init__(lowerCamelCase__ ,lowerCamelCase__ ) def __bool__( self : List[Any] ) -> bool: '''simple docstring''' return False SCREAMING_SNAKE_CASE_ = _DeletedItem() class UpperCamelCase__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self : Dict ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : float = 0.75 ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = initial_block_size SCREAMING_SNAKE_CASE = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 SCREAMING_SNAKE_CASE = capacity_factor SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : KEY ) -> int: '''simple docstring''' return hash(lowerCamelCase__ ) % len(self._buckets ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ) -> bool: '''simple docstring''' SCREAMING_SNAKE_CASE = self._buckets[ind] if not stored: SCREAMING_SNAKE_CASE = _Item(lowerCamelCase__ ,lowerCamelCase__ ) self._len += 1 return True elif stored.key == key: SCREAMING_SNAKE_CASE = _Item(lowerCamelCase__ ,lowerCamelCase__ ) return True else: return False def SCREAMING_SNAKE_CASE__ ( self : str ) -> bool: '''simple docstring''' SCREAMING_SNAKE_CASE = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False SCREAMING_SNAKE_CASE = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = self._buckets SCREAMING_SNAKE_CASE = [None] * new_size SCREAMING_SNAKE_CASE = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : KEY ) -> Iterator[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self._get_bucket_index(lowerCamelCase__ ) for _ in range(len(self._buckets ) ): yield ind SCREAMING_SNAKE_CASE = self._get_next_ind(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCamelCase__ ): if self._try_set(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ): break def __setitem__( self : Optional[Any] ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCamelCase__ ,lowerCamelCase__ ) def __delitem__( self : Any ,lowerCamelCase__ : KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase__ ) if item is _deleted: continue if item.key == key: SCREAMING_SNAKE_CASE = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Dict ,lowerCamelCase__ : KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase__ ) def __len__( self : Union[str, Any] ) -> int: '''simple docstring''' return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self : List[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """ ,""".join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
296
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
1
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[list]: '''simple docstring''' SCREAMING_SNAKE_CASE = current_set.copy() for row_index, row in enumerate(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = row[0] for column_index, column in enumerate(_SCREAMING_SNAKE_CASE ): if magnitude == 0: SCREAMING_SNAKE_CASE = column continue SCREAMING_SNAKE_CASE = column / magnitude # Subtract to cancel term SCREAMING_SNAKE_CASE = current_set[0] SCREAMING_SNAKE_CASE = [first_row] SCREAMING_SNAKE_CASE = current_set[1::] for row in current_set: SCREAMING_SNAKE_CASE = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_SCREAMING_SNAKE_CASE ) continue for column_index in range(len(_SCREAMING_SNAKE_CASE ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_SCREAMING_SNAKE_CASE ) # Create next recursion iteration set if len(final_set[0] ) != 3: SCREAMING_SNAKE_CASE = final_set[0] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) SCREAMING_SNAKE_CASE = simplify(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = resultant return final_set def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list: '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) + 1 if any(len(_SCREAMING_SNAKE_CASE ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_SCREAMING_SNAKE_CASE ) == 1: return [equations[0][-1] / equations[0][0]] SCREAMING_SNAKE_CASE = equations.copy() if any(0 in row for row in data_set ): SCREAMING_SNAKE_CASE = data_set.copy() SCREAMING_SNAKE_CASE = [] for row_index, row in enumerate(_SCREAMING_SNAKE_CASE ): if 0 not in row: SCREAMING_SNAKE_CASE = data_set.pop(_SCREAMING_SNAKE_CASE ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = data_set.copy() SCREAMING_SNAKE_CASE = simplify(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = simplified[::-1] SCREAMING_SNAKE_CASE = [] for row in simplified: SCREAMING_SNAKE_CASE = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue SCREAMING_SNAKE_CASE = row.copy()[: len(_SCREAMING_SNAKE_CASE ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_SCREAMING_SNAKE_CASE ) == 0: solutions.append(0 ) continue SCREAMING_SNAKE_CASE = temp_row[1::] SCREAMING_SNAKE_CASE = temp_row[::-1] for column_index, column in enumerate(_SCREAMING_SNAKE_CASE ): current_solution -= column * solutions[column_index] solutions.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = [] for item in solutions: final.append(float(round(_SCREAMING_SNAKE_CASE , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
296
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = SwinConfig() SCREAMING_SNAKE_CASE = swin_name.split("""_""" ) SCREAMING_SNAKE_CASE = name_split[1] SCREAMING_SNAKE_CASE = int(name_split[4] ) SCREAMING_SNAKE_CASE = int(name_split[3][-1] ) if model_size == "tiny": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 6, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE = 1_28 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE = 1_92 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (6, 12, 24, 48) if "in22k" in swin_name: SCREAMING_SNAKE_CASE = 2_18_41 else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = img_size SCREAMING_SNAKE_CASE = num_classes SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = window_size return config def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: SCREAMING_SNAKE_CASE = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "norm.bias": SCREAMING_SNAKE_CASE = """layernorm.bias""" if "head" in name: SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" ) else: SCREAMING_SNAKE_CASE = """swin.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[1] ) SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[ :dim ] SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE = val[ -dim: ] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
296
1
from __future__ import annotations def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: # noqa: E741 '''simple docstring''' while r - l > 1: SCREAMING_SNAKE_CASE = (l + r) // 2 if v[m] >= key: SCREAMING_SNAKE_CASE = m else: SCREAMING_SNAKE_CASE = m # noqa: E741 return r def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) == 0: return 0 SCREAMING_SNAKE_CASE = [0] * len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = v[0] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): if v[i] < tail[0]: SCREAMING_SNAKE_CASE = v[i] elif v[i] > tail[length - 1]: SCREAMING_SNAKE_CASE = v[i] length += 1 else: SCREAMING_SNAKE_CASE = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
296
import os from distutils.util import strtobool def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' for e in env_keys: SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
296
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_ = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __snake_case : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __snake_case : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __snake_case : str = field( default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) __snake_case : str = field( default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __snake_case : str = field( default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __snake_case : str = field( default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __snake_case : str = field( default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) __snake_case : str = field( default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) __snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" ,lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
296
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any=1024 ,lowerCamelCase__ : Optional[Any]=1024 ,lowerCamelCase__ : int=3.6 ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = tokenizer SCREAMING_SNAKE_CASE = tokenizer.bos_token_id SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = seq_length * chars_per_token * num_of_sequences def __iter__( self : List[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = iter(self.dataset ) SCREAMING_SNAKE_CASE = True while more_examples: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowerCamelCase__ )["""content"""] ) buffer_len += len(buffer[-1] ) except StopIteration: SCREAMING_SNAKE_CASE = False break SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase__ ,truncation=lowerCamelCase__ )["""input_ids"""] SCREAMING_SNAKE_CASE = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 ,len(lowerCamelCase__ ) ,self.seq_length ): SCREAMING_SNAKE_CASE = all_token_ids[i : i + self.seq_length] if len(lowerCamelCase__ ) == self.seq_length: yield torch.tensor(lowerCamelCase__ ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = {"""streaming""": True} SCREAMING_SNAKE_CASE = load_dataset(args.dataset_name , split="""train""" , **_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length ) SCREAMING_SNAKE_CASE = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) return eval_dataloader def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' model.eval() SCREAMING_SNAKE_CASE = [] for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break SCREAMING_SNAKE_CASE = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) ) try: SCREAMING_SNAKE_CASE = torch.exp(_SCREAMING_SNAKE_CASE ) except OverflowError: SCREAMING_SNAKE_CASE = float("""inf""" ) return loss.item(), perplexity.item() # Setup Accelerator SCREAMING_SNAKE_CASE_ = Accelerator() # Parse configuration SCREAMING_SNAKE_CASE_ = HfArgumentParser(EvaluationArguments) SCREAMING_SNAKE_CASE_ = parser.parse_args() set_seed(args.seed) # Logging SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader SCREAMING_SNAKE_CASE_ = create_dataloader(args) # Prepare everything with our `accelerator`. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = evaluate(args) logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
296
import math import unittest def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,) self.assertFalse( is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
296
1
from torch import nn def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
296
import random class UpperCamelCase__ : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]: '''simple docstring''' SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in plain: SCREAMING_SNAKE_CASE = random.randint(1 ,300 ) SCREAMING_SNAKE_CASE = (i + k) * k cipher.append(lowerCamelCase__ ) key.append(lowerCamelCase__ ) return cipher, key @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] for i in range(len(lowerCamelCase__ ) ): SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowerCamelCase__ ) ) return "".join(lowerCamelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
296
1
# Lint as: python3 import itertools import os import re SCREAMING_SNAKE_CASE_ = re.compile(r"""([A-Z]+)([A-Z][a-z])""") SCREAMING_SNAKE_CASE_ = re.compile(r"""([a-z\d])([A-Z])""") SCREAMING_SNAKE_CASE_ = re.compile(r"""(?<!_)_(?!_)""") SCREAMING_SNAKE_CASE_ = re.compile(r"""(_{2,})""") SCREAMING_SNAKE_CASE_ = r"""^\w+(\.\w+)*$""" SCREAMING_SNAKE_CASE_ = r"""<>:/\|?*""" def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = _uppercase_uppercase_re.sub(r"""\1_\2""" , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = _lowercase_uppercase_re.sub(r"""\1_\2""" , _SCREAMING_SNAKE_CASE ) return name.lower() def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = _single_underscore_re.split(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = [_multiple_underscores_re.split(_SCREAMING_SNAKE_CASE ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) if n != """""" ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' if os.path.basename(_SCREAMING_SNAKE_CASE ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(_SCREAMING_SNAKE_CASE ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if os.path.basename(_SCREAMING_SNAKE_CASE ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , _SCREAMING_SNAKE_CASE ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(_SCREAMING_SNAKE_CASE )}-{split}""" def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if filetype_suffix: prefix += F""".{filetype_suffix}""" SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return F"""{filepath}*""" def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if shard_lengths: SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_SCREAMING_SNAKE_CASE )] if filetype_suffix: SCREAMING_SNAKE_CASE = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: SCREAMING_SNAKE_CASE = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
296
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = "efficientformer" def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_expansion_ratio SCREAMING_SNAKE_CASE = downsamples SCREAMING_SNAKE_CASE = dim SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = resolution SCREAMING_SNAKE_CASE = pool_size SCREAMING_SNAKE_CASE = downsample_patch_size SCREAMING_SNAKE_CASE = downsample_stride SCREAMING_SNAKE_CASE = downsample_pad SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = num_metaad_blocks SCREAMING_SNAKE_CASE = distillation SCREAMING_SNAKE_CASE = use_layer_scale SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = batch_norm_eps
296
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel SCREAMING_SNAKE_CASE_ = HfApi() SCREAMING_SNAKE_CASE_ = {} # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) SCREAMING_SNAKE_CASE_ = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on SCREAMING_SNAKE_CASE_ = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": SCREAMING_SNAKE_CASE_ = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) SCREAMING_SNAKE_CASE_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) SCREAMING_SNAKE_CASE_ = torch.tensor([1_0] * noise.shape[0]) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :3_0], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
296
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if "://" in dataset_path: SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1] return dataset_path def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) ) else: fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> None: '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = threading.Lock()
296
1
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.dummy_uncond_unet SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256""" SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
296
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Any = ["pixel_values"] def __init__( self : Union[str, Any] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : bool = True ,**lowerCamelCase__ : int ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 224} SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ ,param_name="""crop_size""" ) SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = resample SCREAMING_SNAKE_CASE = do_center_crop SCREAMING_SNAKE_CASE = crop_size SCREAMING_SNAKE_CASE = do_rescale SCREAMING_SNAKE_CASE = rescale_factor SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE = do_convert_rgb def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCamelCase__ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase__ ) return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(lowerCamelCase__ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Any ,) -> Optional[Any]: '''simple docstring''' return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,) -> np.ndarray: '''simple docstring''' return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : int = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase__ : List[str] ,) -> PIL.Image.Image: '''simple docstring''' SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE = size if size is not None else self.size SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,param_name="""size""" ,default_to_square=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE = [convert_to_rgb(lowerCamelCase__ ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images] SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images] SCREAMING_SNAKE_CASE = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
296
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]: '''simple docstring''' super().__init__( features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = Generator( cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,) SCREAMING_SNAKE_CASE = self.builder.as_dataset( split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory ) return dataset
296
1
from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[str] = ["note_seq"] def __init__( self : Dict ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' requires_backends(self ,["""note_seq"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' requires_backends(cls ,["""note_seq"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> Optional[Any]: '''simple docstring''' requires_backends(cls ,["""note_seq"""] )
296
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) __snake_case : Optional[str] = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
296
1
from __future__ import annotations def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' SCREAMING_SNAKE_CASE = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: SCREAMING_SNAKE_CASE = failure[j - 1] continue i += 1 return False def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [0] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: SCREAMING_SNAKE_CASE = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) SCREAMING_SNAKE_CASE_ = """abc1abc12""" SCREAMING_SNAKE_CASE_ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" SCREAMING_SNAKE_CASE_ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) SCREAMING_SNAKE_CASE_ = """ABABX""" SCREAMING_SNAKE_CASE_ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) SCREAMING_SNAKE_CASE_ = """AAAB""" SCREAMING_SNAKE_CASE_ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) SCREAMING_SNAKE_CASE_ = """abcdabcy""" SCREAMING_SNAKE_CASE_ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) SCREAMING_SNAKE_CASE_ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
296
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = TextToVideoSDPipeline __snake_case : int = TEXT_TO_IMAGE_PARAMS __snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __snake_case : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """np""" SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
296
1
SCREAMING_SNAKE_CASE_ = 6_5_5_2_1 def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 for plain_chr in plain_text: SCREAMING_SNAKE_CASE = (a + ord(_SCREAMING_SNAKE_CASE )) % MOD_ADLER SCREAMING_SNAKE_CASE = (b + a) % MOD_ADLER return (b << 16) | a
296
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("""Invalid input""" ) SCREAMING_SNAKE_CASE = 10**n SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
296
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[int] = "xlm-roberta" def __init__( self : Any ,lowerCamelCase__ : Optional[int]=30522 ,lowerCamelCase__ : Optional[Any]=768 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Optional[Any]=3072 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : str=0.02 ,lowerCamelCase__ : List[Any]=1e-1_2 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : int="absolute" ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Union[str, Any] ,) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
296
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
296
from pathlib import Path import fire def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for path in src_dir.iterdir(): SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n] SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name ) print(_SCREAMING_SNAKE_CASE ) dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": fire.Fire(minify)
296
1
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : str = OpenAIGPTTokenizer __snake_case : List[str] = OpenAIGPTTokenizerFast __snake_case : Tuple = True __snake_case : int = False def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) ) SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""] SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) ) with open(self.merges_file ,"""w""" ) as fp: fp.write("""\n""".join(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Dict ) -> Optional[int]: '''simple docstring''' return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file ) SCREAMING_SNAKE_CASE = """lower""" SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""] SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""] SCREAMING_SNAKE_CASE = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[Any]=15 ) -> Tuple: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ ) # Simple input SCREAMING_SNAKE_CASE = """This is a simple input""" SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(lowerCamelCase__ ,tokenizer_r.encode ,lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding="""max_length""" ) # Simple input self.assertRaises(lowerCamelCase__ ,tokenizer_r.encode_plus ,lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding="""max_length""" ) # Simple input self.assertRaises( lowerCamelCase__ ,tokenizer_r.batch_encode_plus ,lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding="""max_length""" ,) # Pair input self.assertRaises(lowerCamelCase__ ,tokenizer_r.encode ,lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding="""max_length""" ) # Pair input self.assertRaises(lowerCamelCase__ ,tokenizer_r.encode_plus ,lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding="""max_length""" ) # Pair input self.assertRaises( lowerCamelCase__ ,tokenizer_r.batch_encode_plus ,lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding="""max_length""" ,) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' pass @require_ftfy @require_spacy @require_tokenizers class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' pass
296
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
1
import os import sys SCREAMING_SNAKE_CASE_ = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) SCREAMING_SNAKE_CASE_ = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
296
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git_vision_model" def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git" def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]: '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = num_image_with_embedding SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
296
1
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowercase ( ) -> Optional[Any]: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_SCREAMING_SNAKE_CASE ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def __lowercase ( ) -> str: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def __lowercase ( ) -> Tuple: '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_SCREAMING_SNAKE_CASE ): http_head("""https://huggingface.co""" )
296
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = spectrogram_length SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = num_audio_channels SCREAMING_SNAKE_CASE = hop_length SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = sampling_rate def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str: '''simple docstring''' def _flatten(lowerCamelCase__ : List[Any] ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[Any] = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE = feature_extractor( lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = TvltFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
296
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[str] = "cvt" def __init__( self : int ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : int=[7, 3, 3] ,lowerCamelCase__ : Tuple=[4, 2, 2] ,lowerCamelCase__ : Union[str, Any]=[2, 1, 1] ,lowerCamelCase__ : Dict=[64, 192, 384] ,lowerCamelCase__ : str=[1, 3, 6] ,lowerCamelCase__ : Optional[int]=[1, 2, 10] ,lowerCamelCase__ : Union[str, Any]=[4.0, 4.0, 4.0] ,lowerCamelCase__ : str=[0.0, 0.0, 0.0] ,lowerCamelCase__ : List[str]=[0.0, 0.0, 0.0] ,lowerCamelCase__ : Dict=[0.0, 0.0, 0.1] ,lowerCamelCase__ : str=[True, True, True] ,lowerCamelCase__ : str=[False, False, True] ,lowerCamelCase__ : Tuple=["dw_bn", "dw_bn", "dw_bn"] ,lowerCamelCase__ : Optional[Any]=[3, 3, 3] ,lowerCamelCase__ : Union[str, Any]=[1, 1, 1] ,lowerCamelCase__ : List[str]=[2, 2, 2] ,lowerCamelCase__ : List[Any]=[1, 1, 1] ,lowerCamelCase__ : List[str]=[1, 1, 1] ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : int=1e-1_2 ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = drop_rate SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = qkv_projection_method SCREAMING_SNAKE_CASE = kernel_qkv SCREAMING_SNAKE_CASE = padding_kv SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = padding_q SCREAMING_SNAKE_CASE = stride_q SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps
296
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase__ : '''simple docstring''' def __init__( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int]=13 ,lowerCamelCase__ : Any=7 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : str=99 ,lowerCamelCase__ : Dict=64 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Optional[int]=0.02 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : List[str]=None ,) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = GPTNeoXModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = GPTNeoXModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = GPTNeoXForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = GPTNeoXForQuestionAnswering(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = GPTNeoXForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = GPTNeoXForTokenClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = GPTNeoXForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,use_cache=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) ,config.vocab_size ) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] ,dim=-1 ) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] ,dim=-1 ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output_from_no_past["""hidden_states"""][0] SCREAMING_SNAKE_CASE = model( lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,)["""hidden_states"""][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) ,output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Tuple = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __snake_case : Optional[int] = (GPTNeoXForCausalLM,) if is_torch_available() else () __snake_case : Tuple = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __snake_case : List[str] = False __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = GPTNeoXModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=64 ,num_attention_heads=8 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ids_tensor([1, 10] ,config.vocab_size ) SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = GPTNeoXModel(lowerCamelCase__ ) original_model.to(lowerCamelCase__ ) original_model.eval() SCREAMING_SNAKE_CASE = original_model(lowerCamelCase__ ).last_hidden_state SCREAMING_SNAKE_CASE = original_model(lowerCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = {"""type""": scaling_type, """factor""": 10.0} SCREAMING_SNAKE_CASE = GPTNeoXModel(lowerCamelCase__ ) scaled_model.to(lowerCamelCase__ ) scaled_model.eval() SCREAMING_SNAKE_CASE = scaled_model(lowerCamelCase__ ).last_hidden_state SCREAMING_SNAKE_CASE = scaled_model(lowerCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-5 ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: SCREAMING_SNAKE_CASE = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(lowerCamelCase__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 SCREAMING_SNAKE_CASE = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" SCREAMING_SNAKE_CASE = model.generate(**lowerCamelCase__ ,do_sample=lowerCamelCase__ ,max_new_tokens=20 ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase__ )[0] self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
296
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
296
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset SCREAMING_SNAKE_CASE_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class UpperCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple ,lowerCamelCase__ : List[Any] ) -> Optional[int]: '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE = torchvision.models.resnetaaa(pretrained=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = list(model.children() )[:-2] SCREAMING_SNAKE_CASE = nn.Sequential(*lowerCamelCase__ ) SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.pool(self.model(lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = torch.flatten(lowerCamelCase__ ,start_dim=2 ) SCREAMING_SNAKE_CASE = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = [json.loads(lowerCamelCase__ ) for l in open(lowerCamelCase__ )] SCREAMING_SNAKE_CASE = os.path.dirname(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokenizer SCREAMING_SNAKE_CASE = labels SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = transforms def __len__( self : Any ) -> Optional[int]: '''simple docstring''' return len(self.data ) def __getitem__( self : List[Any] ,lowerCamelCase__ : Dict ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = sentence[0], sentence[1:-1], sentence[-1] SCREAMING_SNAKE_CASE = sentence[: self.max_seq_length] SCREAMING_SNAKE_CASE = torch.zeros(self.n_classes ) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) SCREAMING_SNAKE_CASE = self.transforms(lowerCamelCase__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = [len(row["""sentence"""] ) for row in batch] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long ) SCREAMING_SNAKE_CASE = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE = input_row["""sentence"""] SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = torch.stack([row["""image"""] for row in batch] ) SCREAMING_SNAKE_CASE = torch.stack([row["""label"""] for row in batch] ) SCREAMING_SNAKE_CASE = torch.stack([row["""image_start_token"""] for row in batch] ) SCREAMING_SNAKE_CASE = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def __lowercase ( ) -> List[Any]: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def __lowercase ( ) -> List[str]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
296
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "table-transformer" __snake_case : Union[str, Any] = ["past_key_values"] __snake_case : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) # set timm attributes to None SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.d_model class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' return 12
296
1
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : int = RoFormerTokenizer __snake_case : Optional[Any] = RoFormerTokenizerFast __snake_case : Any = True __snake_case : int = True def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' super().setUp() def SCREAMING_SNAKE_CASE__ ( self : str ,**lowerCamelCase__ : Dict ) -> Tuple: '''simple docstring''' return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,**lowerCamelCase__ : Dict ) -> List[str]: '''simple docstring''' return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = """永和服装饰品有限公司,今天天气非常好""" SCREAMING_SNAKE_CASE = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_chinese_input_output_texts() SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,output_text.split() ) SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_chinese_input_output_texts() SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,output_text.split() ) SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' pass
296
from collections import defaultdict from math import gcd def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ): if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1: continue SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
296
1
import unittest from transformers import DonutProcessor SCREAMING_SNAKE_CASE_ = """naver-clova-ix/donut-base""" class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = DonutProcessor.from_pretrained(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = { """name""": """John Doe""", """age""": """99""", """city""": """Atlanta""", """state""": """GA""", """zip""": """30301""", """phone""": """123-4567""", """nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}], } SCREAMING_SNAKE_CASE = ( """<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>""" """<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>""" """<s_nicknames><s_nickname>Johnny</s_nickname>""" """<sep/><s_nickname>JD</s_nickname></s_nicknames>""" ) SCREAMING_SNAKE_CASE = self.processor.tokenajson(lowerCamelCase__ ) self.assertDictEqual(lowerCamelCase__ ,lowerCamelCase__ )
296
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
1
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase__ ( datasets.BuilderConfig ): '''simple docstring''' __snake_case : int = 10000 __snake_case : Optional[List[str]] = None __snake_case : Optional[datasets.Features] = None class UpperCamelCase__ ( datasets.ArrowBasedBuilder ): '''simple docstring''' __snake_case : List[Any] = ParquetConfig def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Any ) -> int: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCamelCase__ ,(str, list, tuple) ): SCREAMING_SNAKE_CASE = data_files if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )] SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(lowerCamelCase__ ): with open(lowerCamelCase__ ,"""rb""" ) as f: SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(lowerCamelCase__ ) ) break splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={"""files""": files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : pa.Table ) -> pa.Table: '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE = table_cast(lowerCamelCase__ ,self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[str] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ): with open(lowerCamelCase__ ,"""rb""" ) as f: SCREAMING_SNAKE_CASE = pq.ParquetFile(lowerCamelCase__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ): SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"""{file_idx}_{batch_idx}""", self._cast_table(lowerCamelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" ) raise
296
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = SwinConfig() SCREAMING_SNAKE_CASE = swin_name.split("""_""" ) SCREAMING_SNAKE_CASE = name_split[1] SCREAMING_SNAKE_CASE = int(name_split[4] ) SCREAMING_SNAKE_CASE = int(name_split[3][-1] ) if model_size == "tiny": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 6, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE = 96 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE = 1_28 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE = 1_92 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (6, 12, 24, 48) if "in22k" in swin_name: SCREAMING_SNAKE_CASE = 2_18_41 else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = img_size SCREAMING_SNAKE_CASE = num_classes SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = window_size return config def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: SCREAMING_SNAKE_CASE = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "norm.bias": SCREAMING_SNAKE_CASE = """layernorm.bias""" if "head" in name: SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" ) else: SCREAMING_SNAKE_CASE = """swin.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[1] ) SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[ :dim ] SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE = val[ -dim: ] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
296
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""ViTFeatureExtractor"""] SCREAMING_SNAKE_CASE_ = ["""ViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """VIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTForImageClassification""", """ViTForMaskedImageModeling""", """ViTModel""", """ViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """TFViTForImageClassification""", """TFViTModel""", """TFViTPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """FlaxViTForImageClassification""", """FlaxViTModel""", """FlaxViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
import os from distutils.util import strtobool def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' for e in env_keys: SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
296
1
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def __lowercase ( _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=10_26 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , _SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ) -> int: '''simple docstring''' set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = generate_datasets( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model SCREAMING_SNAKE_CASE = load_gpta("""gpt2""" ).to(_SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) SCREAMING_SNAKE_CASE = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , _SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=15 , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE="igf_model.pt" , ) -> Dict: '''simple docstring''' set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE = SecondaryLearner(_SCREAMING_SNAKE_CASE ) # Train secondary learner SCREAMING_SNAKE_CASE = train_secondary_learner( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_epochs=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=_SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=10_00 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=recopy_gpta , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) SCREAMING_SNAKE_CASE = RandomSampler(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = max_steps // (len(_SCREAMING_SNAKE_CASE )) + 1 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = torch.zeros((1, context_len) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = recopy_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(_SCREAMING_SNAKE_CASE ) secondary_learner.eval() SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) test_perps.append(_SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , _SCREAMING_SNAKE_CASE , """:""" , _SCREAMING_SNAKE_CASE ) for epoch in range(int(_SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(_SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = True if secondary_learner is not None: SCREAMING_SNAKE_CASE = secondary_learner.forward( torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) test_perps.append(_SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , _SCREAMING_SNAKE_CASE , """:""" , _SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=_SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=1_00 , type=_SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=1_00 , type=_SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=10_00 , type=_SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=1_28 , type=_SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=_SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=_SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=1_00 , type=_SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=10_26 , type=_SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=_SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=_SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner SCREAMING_SNAKE_CASE = training_secondary_learner( _SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_SCREAMING_SNAKE_CASE , secondary_learner=_SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
296
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __snake_case : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __snake_case : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __snake_case : str = field( default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) __snake_case : str = field( default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __snake_case : str = field( default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __snake_case : str = field( default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __snake_case : str = field( default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) __snake_case : str = field( default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) __snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" ,lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
296
1
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("""Invalid input""" ) SCREAMING_SNAKE_CASE = 10**n SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
296
import math import unittest def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,) self.assertFalse( is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
296
1
import math from datetime import datetime, timedelta def __lowercase ( _SCREAMING_SNAKE_CASE ) -> datetime: '''simple docstring''' SCREAMING_SNAKE_CASE = year % 19 SCREAMING_SNAKE_CASE = year % 4 SCREAMING_SNAKE_CASE = year % 7 SCREAMING_SNAKE_CASE = math.floor(year / 1_00 ) SCREAMING_SNAKE_CASE = math.floor((13 + 8 * leap_day_inhibits) / 25 ) SCREAMING_SNAKE_CASE = leap_day_inhibits / 4 SCREAMING_SNAKE_CASE = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 SCREAMING_SNAKE_CASE = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 SCREAMING_SNAKE_CASE = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon SCREAMING_SNAKE_CASE = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(_SCREAMING_SNAKE_CASE , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(_SCREAMING_SNAKE_CASE , 4 , 18 ) else: return datetime(_SCREAMING_SNAKE_CASE , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): SCREAMING_SNAKE_CASE_ = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
296
import random class UpperCamelCase__ : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]: '''simple docstring''' SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in plain: SCREAMING_SNAKE_CASE = random.randint(1 ,300 ) SCREAMING_SNAKE_CASE = (i + k) * k cipher.append(lowerCamelCase__ ) key.append(lowerCamelCase__ ) return cipher, key @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] for i in range(len(lowerCamelCase__ ) ): SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowerCamelCase__ ) ) return "".join(lowerCamelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
296
1
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = F"""Input value of [number={number}] must be an integer""" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 0: return False SCREAMING_SNAKE_CASE = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
296
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = "efficientformer" def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_expansion_ratio SCREAMING_SNAKE_CASE = downsamples SCREAMING_SNAKE_CASE = dim SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = resolution SCREAMING_SNAKE_CASE = pool_size SCREAMING_SNAKE_CASE = downsample_patch_size SCREAMING_SNAKE_CASE = downsample_stride SCREAMING_SNAKE_CASE = downsample_pad SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = num_metaad_blocks SCREAMING_SNAKE_CASE = distillation SCREAMING_SNAKE_CASE = use_layer_scale SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = batch_norm_eps
296
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } SCREAMING_SNAKE_CASE_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for attribute in key.split(""".""" ): SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: SCREAMING_SNAKE_CASE = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": SCREAMING_SNAKE_CASE = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE = value elif weight_type == "bias": SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = fairseq_model.state_dict() SCREAMING_SNAKE_CASE = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) SCREAMING_SNAKE_CASE = True else: for key, mapped_key in MAPPING.items(): SCREAMING_SNAKE_CASE = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue SCREAMING_SNAKE_CASE = True if "*" in mapped_key: SCREAMING_SNAKE_CASE = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: SCREAMING_SNAKE_CASE = """weight_g""" elif "weight_v" in name: SCREAMING_SNAKE_CASE = """weight_v""" elif "bias" in name: SCREAMING_SNAKE_CASE = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj SCREAMING_SNAKE_CASE = """weight""" else: SCREAMING_SNAKE_CASE = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1] SCREAMING_SNAKE_CASE = name.split(""".""" ) SCREAMING_SNAKE_CASE = int(items[0] ) SCREAMING_SNAKE_CASE = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) SCREAMING_SNAKE_CASE = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) SCREAMING_SNAKE_CASE = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) -> List[Any]: '''simple docstring''' if config_path is not None: SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE = UniSpeechSatConfig() SCREAMING_SNAKE_CASE = """""" if is_finetuned: SCREAMING_SNAKE_CASE = UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE = UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) SCREAMING_SNAKE_CASE = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
296
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if "://" in dataset_path: SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1] return dataset_path def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) ) else: fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> None: '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = threading.Lock()
296
1
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels SCREAMING_SNAKE_CASE_ = object() # For specifying empty leaf dict `{}` SCREAMING_SNAKE_CASE_ = object() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE ) + 1 ): SCREAMING_SNAKE_CASE = [x.match(_SCREAMING_SNAKE_CASE ) for x, y in zip(_SCREAMING_SNAKE_CASE , ks[i:] )] if matches and all(_SCREAMING_SNAKE_CASE ): return True return False def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' def replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for rule, replacement in rules: if _match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return replacement return val return replace def __lowercase ( ) -> Optional[Any]: '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , _SCREAMING_SNAKE_CASE )), (("transformer", "wte", "embedding"), P("""mp""" , _SCREAMING_SNAKE_CASE )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_SCREAMING_SNAKE_CASE , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , _SCREAMING_SNAKE_CASE )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_SCREAMING_SNAKE_CASE , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , _SCREAMING_SNAKE_CASE )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = _get_partition_rules() SCREAMING_SNAKE_CASE = _replacement_rules(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = {k: _unmatched for k in flatten_dict(_SCREAMING_SNAKE_CASE )} SCREAMING_SNAKE_CASE = {k: replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_SCREAMING_SNAKE_CASE ) )
296
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.dummy_uncond_unet SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256""" SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = KarrasVeScheduler() SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
296
1
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' random.seed(_SCREAMING_SNAKE_CASE ) np.random.seed(_SCREAMING_SNAKE_CASE ) torch.manual_seed(_SCREAMING_SNAKE_CASE ) torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE ) # ^^ safe to call this function even if cuda is not available class UpperCamelCase__ : '''simple docstring''' def __init__( self : int ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ,lowerCamelCase__ : float = 0.9999 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Union[float, int] = 1.0 ,lowerCamelCase__ : Union[float, int] = 2 / 3 ,lowerCamelCase__ : Optional[Any] = None ,lowerCamelCase__ : Dict[str, Any] = None ,**lowerCamelCase__ : str ,) -> List[Any]: '''simple docstring''' if isinstance(lowerCamelCase__ ,torch.nn.Module ): SCREAMING_SNAKE_CASE = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility SCREAMING_SNAKE_CASE = True if kwargs.get("""max_value""" ,lowerCamelCase__ ) is not None: SCREAMING_SNAKE_CASE = """The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = kwargs["""max_value"""] if kwargs.get("""min_value""" ,lowerCamelCase__ ) is not None: SCREAMING_SNAKE_CASE = """The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = kwargs["""min_value"""] SCREAMING_SNAKE_CASE = list(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [p.clone().detach() for p in parameters] if kwargs.get("""device""" ,lowerCamelCase__ ) is not None: SCREAMING_SNAKE_CASE = """The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ) self.to(device=kwargs["""device"""] ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = decay SCREAMING_SNAKE_CASE = min_decay SCREAMING_SNAKE_CASE = update_after_step SCREAMING_SNAKE_CASE = use_ema_warmup SCREAMING_SNAKE_CASE = inv_gamma SCREAMING_SNAKE_CASE = power SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = None # set in `step()` SCREAMING_SNAKE_CASE = model_cls SCREAMING_SNAKE_CASE = model_config @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ) -> "EMAModel": '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model_cls.load_config(lowerCamelCase__ ,return_unused_kwargs=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model_cls.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = cls(model.parameters() ,model_cls=lowerCamelCase__ ,model_config=model.config ) ema_model.load_state_dict(lowerCamelCase__ ) return ema_model def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tuple ) -> List[str]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) SCREAMING_SNAKE_CASE = self.model_cls.from_config(self.model_config ) SCREAMING_SNAKE_CASE = self.state_dict() state_dict.pop("""shadow_params""" ,lowerCamelCase__ ) model.register_to_config(**lowerCamelCase__ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ) -> float: '''simple docstring''' SCREAMING_SNAKE_CASE = max(0 ,optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: SCREAMING_SNAKE_CASE = 1 - (1 + step / self.inv_gamma) ** -self.power else: SCREAMING_SNAKE_CASE = (1 + step) / (10 + step) SCREAMING_SNAKE_CASE = min(lowerCamelCase__ ,self.decay ) # make sure decay is not smaller than min_decay SCREAMING_SNAKE_CASE = max(lowerCamelCase__ ,self.min_decay ) return cur_decay_value @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> str: '''simple docstring''' if isinstance(lowerCamelCase__ ,torch.nn.Module ): SCREAMING_SNAKE_CASE = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = parameters.parameters() SCREAMING_SNAKE_CASE = list(lowerCamelCase__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. SCREAMING_SNAKE_CASE = self.get_decay(self.optimization_step ) SCREAMING_SNAKE_CASE = decay SCREAMING_SNAKE_CASE = 1 - decay SCREAMING_SNAKE_CASE = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params ,lowerCamelCase__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): SCREAMING_SNAKE_CASE = deepspeed.zero.GatheredParameters(lowerCamelCase__ ,modifier_rank=lowerCamelCase__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = list(lowerCamelCase__ ) for s_param, param in zip(self.shadow_params ,lowerCamelCase__ ): param.data.copy_(s_param.to(param.device ).data ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : str=None ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = [ p.to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ ) for p in self.shadow_params ] def SCREAMING_SNAKE_CASE__ ( self : str ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = [param.detach().cpu().clone() for param in parameters] def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params ,lowerCamelCase__ ): param.data.copy_(c_param.data ) # Better memory-wise. SCREAMING_SNAKE_CASE = None def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : dict ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = state_dict.get("""decay""" ,self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) SCREAMING_SNAKE_CASE = state_dict.get("""min_decay""" ,self.min_decay ) if not isinstance(self.min_decay ,lowerCamelCase__ ): raise ValueError("""Invalid min_decay""" ) SCREAMING_SNAKE_CASE = state_dict.get("""optimization_step""" ,self.optimization_step ) if not isinstance(self.optimization_step ,lowerCamelCase__ ): raise ValueError("""Invalid optimization_step""" ) SCREAMING_SNAKE_CASE = state_dict.get("""update_after_step""" ,self.update_after_step ) if not isinstance(self.update_after_step ,lowerCamelCase__ ): raise ValueError("""Invalid update_after_step""" ) SCREAMING_SNAKE_CASE = state_dict.get("""use_ema_warmup""" ,self.use_ema_warmup ) if not isinstance(self.use_ema_warmup ,lowerCamelCase__ ): raise ValueError("""Invalid use_ema_warmup""" ) SCREAMING_SNAKE_CASE = state_dict.get("""inv_gamma""" ,self.inv_gamma ) if not isinstance(self.inv_gamma ,(float, int) ): raise ValueError("""Invalid inv_gamma""" ) SCREAMING_SNAKE_CASE = state_dict.get("""power""" ,self.power ) if not isinstance(self.power ,(float, int) ): raise ValueError("""Invalid power""" ) SCREAMING_SNAKE_CASE = state_dict.get("""shadow_params""" ,lowerCamelCase__ ) if shadow_params is not None: SCREAMING_SNAKE_CASE = shadow_params if not isinstance(self.shadow_params ,lowerCamelCase__ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCamelCase__ ,torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
296
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]: '''simple docstring''' super().__init__( features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = Generator( cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,) SCREAMING_SNAKE_CASE = self.builder.as_dataset( split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory ) return dataset
296
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = TextToVideoSDPipeline __snake_case : int = TEXT_TO_IMAGE_PARAMS __snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __snake_case : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """np""" SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
296
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) __snake_case : bool = field( default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) __snake_case : Optional[float] = field( default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) __snake_case : Optional[str] = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
296
1
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class UpperCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ) -> List[Any]: '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE = nn.Linear(3 ,4 ) SCREAMING_SNAKE_CASE = nn.BatchNormad(4 ) SCREAMING_SNAKE_CASE = nn.Linear(4 ,5 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ) -> Optional[Any]: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) ) class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase__ ,model.state_dict() ) SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""index.json""" ) self.assertTrue(os.path.isfile(lowerCamelCase__ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,F"""{key}.dat""" ) self.assertTrue(os.path.isfile(lowerCamelCase__ ) ) # TODO: add tests on the fact weights are properly loaded def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: SCREAMING_SNAKE_CASE = torch.randn(2 ,3 ,dtype=lowerCamelCase__ ) with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = offload_weight(lowerCamelCase__ ,"""weight""" ,lowerCamelCase__ ,{} ) SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""weight.dat""" ) self.assertTrue(os.path.isfile(lowerCamelCase__ ) ) self.assertDictEqual(lowerCamelCase__ ,{"""weight""": {"""shape""": [2, 3], """dtype""": str(lowerCamelCase__ ).split(""".""" )[1]}} ) SCREAMING_SNAKE_CASE = load_offloaded_weight(lowerCamelCase__ ,index["""weight"""] ) self.assertTrue(torch.equal(lowerCamelCase__ ,lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = ModelForTest() SCREAMING_SNAKE_CASE = model.state_dict() SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" not in k} SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCamelCase__ ,save_folder=lowerCamelCase__ ) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase__ ) ,sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase__ ,weight_map[key] ) ) SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" in k} SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCamelCase__ ,save_folder=lowerCamelCase__ ) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase__ ) ,sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase__ ,weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase__ ,lowerCamelCase__ ) # Duplicates are removed SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCamelCase__ ,save_folder=lowerCamelCase__ ) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase__ ) ,sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase__ ,weight_map[key] ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} SCREAMING_SNAKE_CASE = extract_submodules_state_dict(lowerCamelCase__ ,["""a.1""", """a.2"""] ) self.assertDictEqual(lowerCamelCase__ ,{"""a.1""": 0, """a.2""": 2} ) SCREAMING_SNAKE_CASE = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} SCREAMING_SNAKE_CASE = extract_submodules_state_dict(lowerCamelCase__ ,["""a.1""", """a.2"""] ) self.assertDictEqual(lowerCamelCase__ ,{"""a.1.a""": 0, """a.2.a""": 2} )
296
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[str] = TextToVideoSDPipeline __snake_case : int = TEXT_TO_IMAGE_PARAMS __snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __snake_case : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]: '''simple docstring''' if str(lowerCamelCase__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """np""" SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) SCREAMING_SNAKE_CASE = pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE = """Spiderman is surfing""" SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames SCREAMING_SNAKE_CASE = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
296
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
296
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0: raise ValueError("""Invalid input""" ) SCREAMING_SNAKE_CASE = 10**n SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
296
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[str] = ["input_features"] def __init__( self : str ,lowerCamelCase__ : Union[str, Any]=80 ,lowerCamelCase__ : str=16000 ,lowerCamelCase__ : List[Any]=160 ,lowerCamelCase__ : Optional[int]=30 ,lowerCamelCase__ : Optional[int]=400 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : str ,) -> int: '''simple docstring''' super().__init__( feature_size=lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,padding_value=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = n_fft SCREAMING_SNAKE_CASE = hop_length SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = chunk_length * sampling_rate SCREAMING_SNAKE_CASE = self.n_samples // hop_length SCREAMING_SNAKE_CASE = sampling_rate SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=lowerCamelCase__ ,min_frequency=0.0 ,max_frequency=8000.0 ,sampling_rate=lowerCamelCase__ ,norm="""slaney""" ,mel_scale="""slaney""" ,) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : np.array ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = spectrogram( lowerCamelCase__ ,window_function(self.n_fft ,"""hann""" ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel="""log10""" ,) SCREAMING_SNAKE_CASE = log_spec[:, :-1] SCREAMING_SNAKE_CASE = np.maximum(lowerCamelCase__ ,log_spec.max() - 8.0 ) SCREAMING_SNAKE_CASE = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : List[np.ndarray] ,lowerCamelCase__ : List[np.ndarray] ,lowerCamelCase__ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: SCREAMING_SNAKE_CASE = np.array(lowerCamelCase__ ,np.intaa ) SCREAMING_SNAKE_CASE = [] for vector, length in zip(lowerCamelCase__ ,attention_mask.sum(-1 ) ): SCREAMING_SNAKE_CASE = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: SCREAMING_SNAKE_CASE = padding_value normed_input_values.append(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self : Dict ,lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[str] = "max_length" ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,**lowerCamelCase__ : int ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) SCREAMING_SNAKE_CASE = isinstance(lowerCamelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(lowerCamelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ ,np.ndarray ): SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ,dtype=np.floataa ) elif isinstance(lowerCamelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] SCREAMING_SNAKE_CASE = BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding SCREAMING_SNAKE_CASE = self.pad( lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=max_length if max_length else self.n_samples ,truncation=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=return_attention_mask or do_normalize ,) # zero-mean and unit-variance normalization if do_normalize: SCREAMING_SNAKE_CASE = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] ,attention_mask=padded_inputs["""attention_mask"""] ,padding_value=self.padding_value ,) SCREAMING_SNAKE_CASE = np.stack(padded_inputs["""input_features"""] ,axis=0 ) # make sure list is in array format SCREAMING_SNAKE_CASE = padded_inputs.get("""input_features""" ).transpose(2 ,0 ,1 ) SCREAMING_SNAKE_CASE = [self._np_extract_fbank_features(lowerCamelCase__ ) for waveform in input_features[0]] if isinstance(input_features[0] ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ,dtype=np.floataa ) for feature in input_features] else: SCREAMING_SNAKE_CASE = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) SCREAMING_SNAKE_CASE = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(lowerCamelCase__ ) return padded_inputs def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
296
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : List[str]=18 ,lowerCamelCase__ : Tuple=30 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] ,) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18} SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_center_crop SCREAMING_SNAKE_CASE = crop_size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Optional[Any] = LevitImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = LevitImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""size""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ ,Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ ,np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ ,torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
296
from pathlib import Path import fire def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for path in src_dir.iterdir(): SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n] SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name ) print(_SCREAMING_SNAKE_CASE ) dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": fire.Fire(minify)
296
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' try: AutoConfig.register("""custom""" ,lowerCamelCase__ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""model""" ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoConfig.register("""bert""" ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "new-model" try: AutoConfig.register("""new-model""" ,lowerCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
296
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "table-transformer" __snake_case : Union[str, Any] = ["past_key_values"] __snake_case : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) # set timm attributes to None SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.d_model class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' return 12
296
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git_vision_model" def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase__ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ ) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "git" def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]: '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = num_image_with_embedding SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
296
1
from math import asin, atan, cos, radians, sin, sqrt, tan SCREAMING_SNAKE_CASE_ = 637_8137.0 SCREAMING_SNAKE_CASE_ = 635_6752.31_4245 SCREAMING_SNAKE_CASE_ = 6_3_7_8_1_3_7 def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) ) SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) ) SCREAMING_SNAKE_CASE = radians(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = radians(_SCREAMING_SNAKE_CASE ) # Equation SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(_SCREAMING_SNAKE_CASE ) * cos(_SCREAMING_SNAKE_CASE ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
296
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = spectrogram_length SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = num_audio_channels SCREAMING_SNAKE_CASE = hop_length SCREAMING_SNAKE_CASE = chunk_length SCREAMING_SNAKE_CASE = sampling_rate def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str: '''simple docstring''' def _flatten(lowerCamelCase__ : List[Any] ): return list(itertools.chain(*lowerCamelCase__ ) ) if equal_length: SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : List[Any] = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE = feature_extractor( lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = TvltFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
296
1
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("""Input list must be a non empty list""" ) if len(_SCREAMING_SNAKE_CASE ) == 1: return True SCREAMING_SNAKE_CASE = series[1] - series[0] for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __lowercase ( _SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("""Input list must be a non empty list""" ) SCREAMING_SNAKE_CASE = 0 for val in series: answer += val return answer / len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
296
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
1