code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType __snake_case :Any = logging.get_logger(__name__) __snake_case :List[str] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = '''layoutlmv3''' def __init__( self : int , __SCREAMING_SNAKE_CASE : str=50_265 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-5 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=1_024 , __SCREAMING_SNAKE_CASE : Dict=128 , __SCREAMING_SNAKE_CASE : Union[str, Any]=128 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , __SCREAMING_SNAKE_CASE : Optional[int]=256 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : str , ): '''simple docstring''' super().__init__( vocab_size=__SCREAMING_SNAKE_CASE , hidden_size=__SCREAMING_SNAKE_CASE , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , intermediate_size=__SCREAMING_SNAKE_CASE , hidden_act=__SCREAMING_SNAKE_CASE , hidden_dropout_prob=__SCREAMING_SNAKE_CASE , attention_probs_dropout_prob=__SCREAMING_SNAKE_CASE , max_position_embeddings=__SCREAMING_SNAKE_CASE , type_vocab_size=__SCREAMING_SNAKE_CASE , initializer_range=__SCREAMING_SNAKE_CASE , layer_norm_eps=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = max_ad_position_embeddings __a = coordinate_size __a = shape_size __a = has_relative_attention_bias __a = rel_pos_bins __a = max_rel_pos __a = has_spatial_attention_bias __a = rel_ad_pos_bins __a = max_rel_ad_pos __a = text_embed __a = visual_embed __a = input_size __a = num_channels __a = patch_size __a = classifier_dropout class _A ( __UpperCAmelCase ): UpperCamelCase__ : str = version.parse('''1.12''' ) @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) else: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}), ]) @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return 1E-5 @property def _lowerCamelCase ( self : Dict): '''simple docstring''' return 12 def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : "ProcessorMixin" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 40 , __SCREAMING_SNAKE_CASE : int = 40 , ): '''simple docstring''' setattr(processor.image_processor , '''apply_ocr''' , __SCREAMING_SNAKE_CASE) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __a = compute_effective_axis_dimension( __SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __a = processor.tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE) __a = compute_effective_axis_dimension( __SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE) # Generate dummy inputs according to compute batch and sequence __a = [[''' '''.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size # Generate dummy bounding boxes __a = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __a = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = dict( processor( __SCREAMING_SNAKE_CASE , text=__SCREAMING_SNAKE_CASE , boxes=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , )) return inputs
60
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case :List[str] = HfApi() __snake_case :str = {} # fmt: off __snake_case :Optional[Any] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __snake_case :Union[str, Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __snake_case :str = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __snake_case :List[Any] = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __snake_case :Any = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __snake_case :List[str] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __snake_case :Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __snake_case :Tuple = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __snake_case :List[Any] = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __snake_case :Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __snake_case :Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __snake_case :List[str] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __snake_case :Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __snake_case :List[str] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __snake_case :Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __snake_case :List[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case :str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case :List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case :Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
60
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case :int = logging.get_logger(__name__) __snake_case :Union[str, Any] = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''deta''' UpperCamelCase__ : Any = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : str , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : str=900 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : Optional[Any]=2_048 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : Dict=6 , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int="relu" , __SCREAMING_SNAKE_CASE : Dict=256 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1.0 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Tuple="sine" , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=300 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=0.25 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''') __a = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4''']) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = backbone_config.pop('''model_type''') __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(__SCREAMING_SNAKE_CASE) __a = backbone_config __a = num_queries __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = init_xavier_std __a = encoder_layerdrop __a = auxiliary_loss __a = position_embedding_type # deformable attributes __a = num_feature_levels __a = encoder_n_points __a = decoder_n_points __a = two_stage __a = two_stage_num_proposals __a = with_box_refine __a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''') # Hungarian matcher __a = class_cost __a = bbox_cost __a = giou_cost # Loss coefficients __a = mask_loss_coefficient __a = dice_loss_coefficient __a = bbox_loss_coefficient __a = giou_loss_coefficient __a = eos_coefficient __a = focal_alpha super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @property def _lowerCamelCase ( self : Any): '''simple docstring''' return self.encoder_attention_heads @property def _lowerCamelCase ( self : Any): '''simple docstring''' return self.d_model def _lowerCamelCase ( self : Any): '''simple docstring''' __a = copy.deepcopy(self.__dict__) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
60
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
60
1
from __future__ import annotations __snake_case :Optional[Any] = [] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = 1 solve(_UpperCAmelCase , row + 1 ) __a = 0 return False def __snake_case ( _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __snake_case :Optional[Any] = 8 __snake_case :Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
60
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __snake_case :List[Any] = '''zero2''' __snake_case :Optional[Any] = '''zero3''' __snake_case :str = [ZEROa, ZEROa] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test __snake_case :List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _A ( __UpperCAmelCase ): @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' pass def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) self.do_checks(__SCREAMING_SNAKE_CASE) return output_dir def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE) __a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __a = self.get_launcher(__SCREAMING_SNAKE_CASE) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env()) return output_dir def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False): '''simple docstring''' __a = min(2 , get_gpu_count()) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
60
1
from torch import nn def __snake_case ( _UpperCAmelCase ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'Unsupported activation function: {act_fn}' )
60
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected string as input, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) __a = input_str.split('''_''' ) __a = 0 if use_pascal else 1 __a = words[start_index:] __a = [word[0].upper() + word[1:] for word in words_to_capitalize] __a = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
60
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case :List[str] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :int = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __snake_case :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _A : UpperCamelCase__ : str UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a , __a , __a = _str_to_version_tuple(self.version_str) def __repr__( self : Tuple): '''simple docstring''' return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.major, self.minor, self.patch def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return Version(__SCREAMING_SNAKE_CASE) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return other raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.') def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' try: __a = self._validate_operand(__SCREAMING_SNAKE_CASE) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self._validate_operand(__SCREAMING_SNAKE_CASE) return self.tuple < other.tuple def __hash__( self : Optional[Any]): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _lowerCamelCase ( self : int): '''simple docstring''' return self.version_str def __snake_case ( _UpperCAmelCase ): __a = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
60
1
import os import sys import unittest __snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''') __snake_case :Any = ''' {0} = None ''' __snake_case :Dict = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __snake_case :str = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''') self.assertIsNone(__SCREAMING_SNAKE_CASE) __a = find_backend(''' if not is_tokenizers_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''') __a = find_backend(''' if not is_tensorflow_text_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''') __a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE) self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE) self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertModel''' , objects['''tf''']) self.assertIn('''FlaxBertModel''' , objects['''flax''']) self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text''']) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers''']) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = create_dummy_object('''CONSTANT''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''') __a = create_dummy_object('''function''' , '''\'torch\'''') self.assertEqual( __SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''') __a = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __a = create_dummy_object('''FakeClass''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']}) self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
60
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case :int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class _A ( tr.AbstractTransform ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "): '''simple docstring''' __a = sentence_delimiter def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return list(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = [] for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE): chars.extend(self.process_string(__SCREAMING_SNAKE_CASE)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1: chars.append(self.sentence_delimiter) return chars __snake_case :Any = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case :Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case :Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __snake_case :Tuple = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' __snake_case :Tuple = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
60
1
__snake_case :Union[str, Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
60
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = ['''ViTFeatureExtractor'''] __snake_case :Optional[Any] = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :str = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
1
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __snake_case :Optional[int] = Lock() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_UpperCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() __a = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left __a = min(_UpperCAmelCase , _UpperCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_UpperCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() __a = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right __a = max(_UpperCAmelCase , _UpperCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = [] __a = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop __a = Pipe() __a = Pipe() process_array_.append( Process( target=_UpperCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) __a = temp_rs __a = temp_rr for i in range(1 , len(_UpperCAmelCase ) - 1 ): __a = Pipe() __a = Pipe() process_array_.append( Process( target=_UpperCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) __a = temp_rs __a = temp_rr process_array_.append( Process( target=_UpperCAmelCase , args=( len(_UpperCAmelCase ) - 1, arr[len(_UpperCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_UpperCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_UpperCAmelCase ) ): __a = result_pipe[p][0].recv() process_array_[p].join() return arr def __snake_case ( ): __a = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*_UpperCAmelCase ) __a = odd_even_transposition(_UpperCAmelCase ) print('''Sorted List\n''' ) print(*_UpperCAmelCase ) if __name__ == "__main__": main()
60
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[str] = GPTSwaTokenizer UpperCamelCase__ : Dict = False UpperCamelCase__ : int = True UpperCamelCase__ : List[Any] = False def _lowerCamelCase ( self : List[Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = '''This is a test''' __a = '''This is a test''' return input_text, output_text def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''<s>''' __a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000) def _lowerCamelCase ( self : Dict): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_000) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = tokenizer.tokenize('''This is a test''') self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842]) __a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on __a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) self.assertListEqual( __SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) __a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def _lowerCamelCase ( self : Any): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] __a = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # Test that decode_fast returns the input text for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Any): '''simple docstring''' __a = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off __a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
60
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __snake_case :List[Any] = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __snake_case :Any = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __snake_case :Any = { '''RUCAIBox/mvp''': 1024, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Dict = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Optional[int] = MvpTokenizer def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int="replace" , __SCREAMING_SNAKE_CASE : List[Any]="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE) != add_prefix_space: __a = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''')) __a = add_prefix_space __a = pre_tok_class(**__SCREAMING_SNAKE_CASE) __a = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __a = '''post_processor''' __a = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) if tokenizer_component_instance: __a = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __a = tuple(state['''sep''']) if "cls" in state: __a = tuple(state['''cls''']) __a = False if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE) != add_prefix_space: __a = add_prefix_space __a = True if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE) != trim_offsets: __a = trim_offsets __a = True if changes_to_apply: __a = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''')) __a = component_class(**__SCREAMING_SNAKE_CASE) setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''') return None return str(self._mask_token) @mask_token.setter def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else value __a = value def _lowerCamelCase ( self : Tuple , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' '''to use it with pretokenized inputs.''') return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' '''to use it with pretokenized inputs.''') return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE) return tuple(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=None): '''simple docstring''' __a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
60
from __future__ import annotations __snake_case :Optional[Any] = [] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = 1 solve(_UpperCAmelCase , row + 1 ) __a = 0 return False def __snake_case ( _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __snake_case :Optional[Any] = 8 __snake_case :Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
60
1
from __future__ import annotations __snake_case :List[str] = 10 def __snake_case ( _UpperCAmelCase ): __a = 1 __a = max(_UpperCAmelCase ) while placement <= max_digit: # declare and initialize empty buckets __a = [[] for _ in range(_UpperCAmelCase )] # split list_of_ints between the buckets for i in list_of_ints: __a = int((i / placement) % RADIX ) buckets[tmp].append(_UpperCAmelCase ) # put each buckets' contents into list_of_ints __a = 0 for b in range(_UpperCAmelCase ): for i in buckets[b]: __a = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
60
def __snake_case ( _UpperCAmelCase ): __a = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( _UpperCAmelCase ): __a = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __a = remove_duplicates(key.upper() ) __a = len(_UpperCAmelCase ) # First fill cipher with key characters __a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_UpperCAmelCase ) , 26 ): __a = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __a = alphabet[i - offset] __a = char return cipher_alphabet def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( ): __a = input('''Enter message to encode or decode: ''' ).strip() __a = input('''Enter keyword: ''' ).strip() __a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __a = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __a = create_cipher_map(_UpperCAmelCase ) print(func(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( _UpperCAmelCase ): __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0 __a = image[None].transpose(0 , 3 , 1 , 2 ) __a = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class _A ( __UpperCAmelCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE) @torch.no_grad() def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = 1 elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): __a = image.shape[0] else: raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}') if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = preprocess(__SCREAMING_SNAKE_CASE) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters()).dtype __a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE) __a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE) # set timesteps and move to the correct device self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(__SCREAMING_SNAKE_CASE): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1) __a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # predict the noise residual __a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample __a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
60
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __snake_case :List[Any] = None __snake_case :Dict = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case :Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __snake_case :Optional[Any] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __snake_case :Optional[int] = '''▁''' class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Dict = BarthezTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = False if not self.vocab_file else True def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __snake_case :str = get_logger() __snake_case :Optional[dict] = None class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' super().__init__(features=__SCREAMING_SNAKE_CASE) import jax from jaxlib.xla_client import Device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): raise ValueError( F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') __a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' F'device: {str(jax.devices()[0])}.') __a = str(jax.devices()[0]) __a = jnp_array_kwargs @staticmethod def _lowerCamelCase ( ): '''simple docstring''' import jax return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()} def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column: if all( isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0) return column def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))): return value elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() __a = {} if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __a = {'''dtype''': jnp.intaa} else: __a = {'''dtype''': jnp.intaa} elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): __a = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = np.asarray(__SCREAMING_SNAKE_CASE) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs}) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array): __a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)): return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) return self._tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict): '''simple docstring''' return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE) return self.recursive_tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0]) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) __a = self._consolidate(__SCREAMING_SNAKE_CASE) return column def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for column_name in batch: __a = self._consolidate(batch[column_name]) return batch
60
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __snake_case ( _UpperCAmelCase ): __a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(rows * cols * num_images ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = labels_dense.shape[0] __a = numpy.arange(_UpperCAmelCase ) * num_classes __a = numpy.zeros((num_labels, num_classes) ) __a = 1 return labels_one_hot @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(_UpperCAmelCase ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class _A : @deprecated( __SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) __a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype) if fake_data: __a = 10_000 __a = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' __a = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __a = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __a = images.astype(numpy.floataa) __a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0) __a = images __a = labels __a = 0 __a = 0 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self._images @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._labels @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._num_examples @property def _lowerCamelCase ( self : str): '''simple docstring''' return self._epochs_completed def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True): '''simple docstring''' if fake_data: __a = [1] * 784 __a = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__SCREAMING_SNAKE_CASE)], [fake_label for _ in range(__SCREAMING_SNAKE_CASE)], ) __a = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perma] __a = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __a = self._num_examples - start __a = self._images[start : self._num_examples] __a = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perm] __a = self.labels[perm] # Start next epoch __a = 0 __a = batch_size - rest_num_examples __a = self._index_in_epoch __a = self._images[start:end] __a = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size __a = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __a = f.size() print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( _UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __a = fake() __a = fake() __a = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __a = DEFAULT_SOURCE_URL __a = '''train-images-idx3-ubyte.gz''' __a = '''train-labels-idx1-ubyte.gz''' __a = '''t10k-images-idx3-ubyte.gz''' __a = '''t10k-labels-idx1-ubyte.gz''' __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __a = ( '''Validation size should be between 0 and ''' f'{len(_UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(_UpperCAmelCase ) __a = train_images[:validation_size] __a = train_labels[:validation_size] __a = train_images[validation_size:] __a = train_labels[validation_size:] __a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
60
1
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
60
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _A ( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ): '''simple docstring''' __a = size if size is not None else {'''height''': 20, '''width''': 20} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_reduce_labels def _lowerCamelCase ( self : str): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(dataset[0]['''file'''] ) __a = Image.open(dataset[1]['''file'''] ) return image, map def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(ds[0]['''file'''] ) __a = Image.open(ds[1]['''file'''] ) __a = Image.open(ds[2]['''file'''] ) __a = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : int): '''simple docstring''' __a = BeitImageProcessingTester(self) @property def _lowerCamelCase ( self : int): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''')) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) __a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) __a = [] for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input __a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test not batched input (PIL images) __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched input (PIL images) __a , __a = prepare_semantic_batch_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 150) __a = True __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255)
60
1
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __snake_case :Union[str, Any] = logging.get_logger(__name__) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = WavaVecaForSequenceClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) __a = downstream_dict['''projector.weight'''] __a = downstream_dict['''projector.bias'''] __a = downstream_dict['''model.post_net.linear.weight'''] __a = downstream_dict['''model.post_net.linear.bias'''] return model def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) __a = downstream_dict['''model.linear.weight'''] __a = downstream_dict['''model.linear.bias'''] return model def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = WavaVecaForXVector.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) __a = downstream_dict['''connector.weight'''] __a = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __a = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] __a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] __a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] __a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] __a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] __a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] __a = downstream_dict['''objective.W'''] return model @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = torch.load(_UpperCAmelCase , map_location='''cpu''' ) __a = checkpoint['''Downstream'''] __a = WavaVecaConfig.from_pretrained(_UpperCAmelCase ) __a = WavaVecaFeatureExtractor.from_pretrained( _UpperCAmelCase , return_attention_mask=_UpperCAmelCase , do_normalize=_UpperCAmelCase ) __a = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): __a = convert_classification(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): __a = convert_diarization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) elif arch.endswith('''ForXVector''' ): __a = convert_xvector(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: __a = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(_UpperCAmelCase ) hf_model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :List[Any] = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') __snake_case :str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
60
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : int): '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2''']) for i, r in enumerate(__SCREAMING_SNAKE_CASE): self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i]) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) __a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info , dset_from_dict.info) def _lowerCamelCase ( self : int): # checks what happens with missing columns '''simple docstring''' __a = [{'''col_1''': 1}, {'''col_2''': '''x'''}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertDictEqual(dset[0] , {'''col_1''': 1}) self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record '''simple docstring''' __a = [{'''col_1''': []}, {'''col_1''': [1, 2]}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64'''))) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Dataset.from_list([]) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0) self.assertListEqual(dset.column_names , [])
60
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = '''Speech2TextFeatureExtractor''' UpperCamelCase__ : Any = '''Speech2TextTokenizer''' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.feature_extractor __a = False def __call__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''') __a = kwargs.pop('''raw_speech''') else: __a = kwargs.pop('''audio''' , __SCREAMING_SNAKE_CASE) __a = kwargs.pop('''sampling_rate''' , __SCREAMING_SNAKE_CASE) __a = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE) if len(__SCREAMING_SNAKE_CASE) > 0: __a = args[0] __a = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''') if audio is not None: __a = self.feature_extractor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) if text is not None: __a = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) if text is None: return inputs elif audio is None: return encodings else: __a = encodings['''input_ids'''] return inputs def _lowerCamelCase ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @contextmanager def _lowerCamelCase ( self : str): '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''') __a = True __a = self.tokenizer yield __a = self.feature_extractor __a = False
60
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __snake_case ( _UpperCAmelCase ): __a = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def __snake_case ( _UpperCAmelCase ): __a = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') ) return token def __snake_case ( ): __a = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": __a = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": __a = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __a = [2, 2, 20] __a = [3, 12, 16] __a = [192, 768, 1024] __a = CvtForImageClassification(_UpperCAmelCase ) __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) __a = image_size __a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) ) __a = OrderedDict() __a = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __a = list_of_state_dict + cls_token(_UpperCAmelCase ) __a = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) __a = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __a = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __snake_case :str = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case :Dict = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
60
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=0 ): __a = [] for old_item in old_list: __a = old_item.replace('''in_layers.0''' , '''norm1''' ) __a = new_item.replace('''in_layers.2''' , '''conv1''' ) __a = new_item.replace('''out_layers.0''' , '''norm2''' ) __a = new_item.replace('''out_layers.3''' , '''conv2''' ) __a = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) __a = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) __a = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=0 ): __a = [] for old_item in old_list: __a = old_item __a = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) __a = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) __a = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) __a = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) __a = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __a = old_checkpoint[path] __a = old_tensor.shape[0] // 3 __a = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __a = old_tensor.shape[0] // config['''num_head_channels'''] // 3 __a = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __a , __a , __a = old_tensor.split(channels // num_heads , dim=1 ) __a = query.reshape(_UpperCAmelCase ) __a = key.reshape(_UpperCAmelCase ) __a = value.reshape(_UpperCAmelCase ) for path in paths: __a = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __a = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) __a = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) __a = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: __a = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __a = old_checkpoint[path['''old''']][:, :, 0] else: __a = old_checkpoint[path['''old''']] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = {} __a = checkpoint['''time_embed.0.weight'''] __a = checkpoint['''time_embed.0.bias'''] __a = checkpoint['''time_embed.2.weight'''] __a = checkpoint['''time_embed.2.bias'''] __a = checkpoint['''input_blocks.0.0.weight'''] __a = checkpoint['''input_blocks.0.0.bias'''] __a = checkpoint['''out.0.weight'''] __a = checkpoint['''out.0.bias'''] __a = checkpoint['''out.2.weight'''] __a = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only __a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) __a = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(_UpperCAmelCase ) } # Retrieves the keys for the middle blocks only __a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) __a = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(_UpperCAmelCase ) } # Retrieves the keys for the output blocks only __a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) __a = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(_UpperCAmelCase ) } for i in range(1 , _UpperCAmelCase ): __a = (i - 1) // (config['''num_res_blocks'''] + 1) __a = (i - 1) % (config['''num_res_blocks'''] + 1) __a = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] __a = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: __a = checkpoint[ f'input_blocks.{i}.0.op.weight' ] __a = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue __a = renew_resnet_paths(_UpperCAmelCase ) __a = {'''old''': f'input_blocks.{i}.0', '''new''': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} __a = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_UpperCAmelCase ) if len(_UpperCAmelCase ): __a = renew_attention_paths(_UpperCAmelCase ) __a = { '''old''': f'input_blocks.{i}.1', '''new''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } __a = { f'input_blocks.{i}.1.qkv.bias': { '''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { '''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase , ) __a = middle_blocks[0] __a = middle_blocks[1] __a = middle_blocks[2] __a = renew_resnet_paths(_UpperCAmelCase ) assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase ) __a = renew_resnet_paths(_UpperCAmelCase ) assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase ) __a = renew_attention_paths(_UpperCAmelCase ) __a = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase ) for i in range(_UpperCAmelCase ): __a = i // (config['''num_res_blocks'''] + 1) __a = i % (config['''num_res_blocks'''] + 1) __a = [shave_segments(_UpperCAmelCase , 2 ) for name in output_blocks[i]] __a = {} for layer in output_block_layers: __a , __a = layer.split('''.''' )[0], shave_segments(_UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_UpperCAmelCase ) else: __a = [layer_name] if len(_UpperCAmelCase ) > 1: __a = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] __a = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] __a = renew_resnet_paths(_UpperCAmelCase ) __a = renew_resnet_paths(_UpperCAmelCase ) __a = {'''old''': f'output_blocks.{i}.0', '''new''': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __a = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) __a = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] __a = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(_UpperCAmelCase ) == 2: __a = [] if len(_UpperCAmelCase ): __a = renew_attention_paths(_UpperCAmelCase ) __a = { '''old''': f'output_blocks.{i}.1', '''new''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } __a = { f'output_blocks.{i}.1.qkv.bias': { '''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { '''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_UpperCAmelCase , ) else: __a = renew_resnet_paths(_UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __a = '''.'''.join(['''output_blocks''', str(_UpperCAmelCase ), path['''old''']] ) __a = '''.'''.join(['''up_blocks''', str(_UpperCAmelCase ), '''resnets''', str(_UpperCAmelCase ), path['''new''']] ) __a = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __snake_case :Union[str, Any] = parser.parse_args() __snake_case :List[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: __snake_case :Union[str, Any] = json.loads(f.read()) __snake_case :List[str] = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __snake_case :Dict = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __snake_case :Optional[int] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __snake_case :Tuple = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __snake_case :Optional[int] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
60
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( _UpperCAmelCase ): __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0 __a = image[None].transpose(0 , 3 , 1 , 2 ) __a = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class _A ( __UpperCAmelCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE) @torch.no_grad() def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = 1 elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): __a = image.shape[0] else: raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}') if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = preprocess(__SCREAMING_SNAKE_CASE) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters()).dtype __a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE) __a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE) # set timesteps and move to the correct device self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(__SCREAMING_SNAKE_CASE): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1) __a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # predict the noise residual __a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample __a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
60
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __snake_case ( _UpperCAmelCase=None ): if subparsers is not None: __a = subparsers.add_parser('''env''' ) else: __a = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=_UpperCAmelCase , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=_UpperCAmelCase ) return parser def __snake_case ( _UpperCAmelCase ): __a = torch.__version__ __a = torch.cuda.is_available() __a = is_xpu_available() __a = is_npu_available() __a = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(_UpperCAmelCase ): __a = load_config_from_file(args.config_file ).to_dict() __a = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})', '''PyTorch XPU available''': str(_UpperCAmelCase ), '''PyTorch NPU available''': str(_UpperCAmelCase ), '''System RAM''': f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB', } if pt_cuda_available: __a = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f'- {prop}: {val}' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __a = ( '''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else f'\t{accelerate_config}' ) print(_UpperCAmelCase ) __a = accelerate_config return info def __snake_case ( ): __a = env_command_parser() __a = parser.parse_args() env_command(_UpperCAmelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
60
from __future__ import annotations from random import random from typing import Generic, TypeVar __snake_case :Any = TypeVar('''KT''') __snake_case :List[str] = TypeVar('''VT''') class _A ( Generic[KT, VT] ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None): '''simple docstring''' __a = key __a = value __a = [] def __repr__( self : Dict): '''simple docstring''' return F'Node({self.key}: {self.value})' @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return len(self.forward) class _A ( Generic[KT, VT] ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16): '''simple docstring''' __a = Node[KT, VT]() __a = 0 __a = p __a = max_level def __str__( self : Union[str, Any]): '''simple docstring''' __a = list(self) if len(__SCREAMING_SNAKE_CASE) == 0: return F'SkipList(level={self.level})' __a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4) __a = max(__SCREAMING_SNAKE_CASE , 4) + 4 __a = self.head __a = [] __a = node.forward.copy() lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) while len(node.forward) != 0: __a = node.forward[0] lines.append( F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) __a = node.forward lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE)) return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE) def __iter__( self : int): '''simple docstring''' __a = self.head while len(node.forward) != 0: yield node.forward[0].key __a = node.forward[0] def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = [] __a = self.head for i in reversed(range(self.level)): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __a = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__SCREAMING_SNAKE_CASE) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: for i, update_node in enumerate(__SCREAMING_SNAKE_CASE): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __a = node.forward[i] else: __a = update_node.forward[:i] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: __a = value else: __a = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE): update_vector.append(self.head) __a = level __a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for i, update_node in enumerate(update_vector[:level]): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i]) if update_node.level < i + 1: update_node.forward.append(__SCREAMING_SNAKE_CASE) else: __a = new_node def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: return node.value return None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value if len(_UpperCAmelCase ) != 4: print() assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def __snake_case ( ): __a = SkipList() assert skip_list.find('''Some key''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def __snake_case ( ): __a = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(_UpperCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __snake_case ( ): def is_sorted(_UpperCAmelCase ): return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) ) __a = SkipList() for i in range(10 ): skip_list.insert(_UpperCAmelCase , _UpperCAmelCase ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCAmelCase ) ) def __snake_case ( ): for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __snake_case ( ): __a = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case :Optional[Any] = { '''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Optional[int] = ['''AlbertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Optional[int] = ['''AlbertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AlbertForMaskedLM''', '''AlbertForMultipleChoice''', '''AlbertForPreTraining''', '''AlbertForQuestionAnswering''', '''AlbertForSequenceClassification''', '''AlbertForTokenClassification''', '''AlbertModel''', '''AlbertPreTrainedModel''', '''load_tf_weights_in_albert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Dict = [ '''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFAlbertForMaskedLM''', '''TFAlbertForMultipleChoice''', '''TFAlbertForPreTraining''', '''TFAlbertForQuestionAnswering''', '''TFAlbertForSequenceClassification''', '''TFAlbertForTokenClassification''', '''TFAlbertMainLayer''', '''TFAlbertModel''', '''TFAlbertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = [ '''FlaxAlbertForMaskedLM''', '''FlaxAlbertForMultipleChoice''', '''FlaxAlbertForPreTraining''', '''FlaxAlbertForQuestionAnswering''', '''FlaxAlbertForSequenceClassification''', '''FlaxAlbertForTokenClassification''', '''FlaxAlbertModel''', '''FlaxAlbertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __snake_case :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
__snake_case :str = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Return True if there is node that has not iterated. __a = [False] * len(_UpperCAmelCase ) __a = [s] __a = True while queue: __a = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCAmelCase ) __a = True __a = u return visited[t] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [-1] * (len(_UpperCAmelCase )) __a = 0 __a = [] __a = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = float('''Inf''' ) __a = sink while s != source: # Find the minimum value in select path __a = min(_UpperCAmelCase , graph[parent[s]][s] ) __a = parent[s] max_flow += path_flow __a = sink while v != source: __a = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __a = parent[v] for i in range(len(_UpperCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
60
1
from scipy.stats import pearsonr import datasets __snake_case :List[str] = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __snake_case :Optional[Any] = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __snake_case :Any = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=False): '''simple docstring''' if return_pvalue: __a = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)[0])}
60
from __future__ import annotations def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(_UpperCAmelCase ): print(f'{i}\t\t{d}' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [float('''inf''' )] * vertex_count __a = 0.0 for _ in range(vertex_count - 1 ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __a = distance[u] + w __a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() __snake_case :Dict = int(input('''Enter number of vertices: ''').strip()) __snake_case :Any = int(input('''Enter number of edges: ''').strip()) __snake_case :list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) __snake_case ,__snake_case ,__snake_case :int = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) __snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight} __snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip()) __snake_case :Optional[Any] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
60
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch __snake_case :Optional[Any] = logging.get_logger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[Any] = ['''pixel_values'''] def __init__( self : Any , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) __a = size if size is not None else {'''shortest_edge''': 224} __a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) __a = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256} __a = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''') __a = do_resize __a = size __a = resample __a = do_rescale __a = rescale_factor __a = do_center_crop __a = crop_size __a = do_flip_channel_order def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' __a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) if "shortest_edge" not in size: raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}') __a = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = get_size_dict(__SCREAMING_SNAKE_CASE) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None): '''simple docstring''' return flip_channel_order(__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = resample if resample is not None else self.resample __a = do_rescale if do_rescale is not None else self.do_rescale __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) __a = size if size is not None else self.size __a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''') __a = make_list_of_images(__SCREAMING_SNAKE_CASE) if not valid_images(__SCREAMING_SNAKE_CASE): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') # All transformations expect numpy arrays. __a = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images] if do_resize: __a = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE) for image in images] if do_center_crop: __a = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE) for image in images] if do_rescale: __a = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: __a = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE) for image in images] __a = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for image in images] __a = {'''pixel_values''': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Tuple] = None): '''simple docstring''' __a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''') if is_torch_tensor(__SCREAMING_SNAKE_CASE): __a = target_sizes.numpy() __a = [] for idx in range(len(__SCREAMING_SNAKE_CASE)): __a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE) __a = resized_logits[0].argmax(dim=0) semantic_segmentation.append(__SCREAMING_SNAKE_CASE) else: __a = logits.argmax(dim=1) __a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
60
import os import sys import unittest __snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''') __snake_case :Any = ''' {0} = None ''' __snake_case :Dict = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __snake_case :str = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''') self.assertIsNone(__SCREAMING_SNAKE_CASE) __a = find_backend(''' if not is_tokenizers_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''') __a = find_backend(''' if not is_tensorflow_text_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''') __a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE) self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE) self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertModel''' , objects['''tf''']) self.assertIn('''FlaxBertModel''' , objects['''flax''']) self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text''']) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers''']) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = create_dummy_object('''CONSTANT''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''') __a = create_dummy_object('''function''' , '''\'torch\'''') self.assertEqual( __SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''') __a = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __a = create_dummy_object('''FakeClass''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']}) self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
60
1
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case :List[str] = logging.get_logger(__name__) __snake_case :Dict = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = '''conditional_detr''' UpperCamelCase__ : str = ['''past_key_values'''] UpperCamelCase__ : Union[str, Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Optional[int]=300 , __SCREAMING_SNAKE_CASE : Tuple=6 , __SCREAMING_SNAKE_CASE : Tuple=2_048 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : int=6 , __SCREAMING_SNAKE_CASE : Optional[Any]=2_048 , __SCREAMING_SNAKE_CASE : Optional[Any]=8 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=256 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : int=1.0 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Tuple="sine" , __SCREAMING_SNAKE_CASE : Dict="resnet50" , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.25 , **__SCREAMING_SNAKE_CASE : int , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''') if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''') __a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4''']) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = backbone_config.get('''model_type''') __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(__SCREAMING_SNAKE_CASE) __a = use_timm_backbone __a = backbone_config __a = num_channels __a = num_queries __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = init_xavier_std __a = encoder_layerdrop __a = decoder_layerdrop __a = encoder_layers __a = auxiliary_loss __a = position_embedding_type __a = backbone __a = use_pretrained_backbone __a = dilation # Hungarian matcher __a = class_cost __a = bbox_cost __a = giou_cost # Loss coefficients __a = mask_loss_coefficient __a = dice_loss_coefficient __a = cls_loss_coefficient __a = bbox_loss_coefficient __a = giou_loss_coefficient __a = focal_alpha super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @property def _lowerCamelCase ( self : Any): '''simple docstring''' return self.encoder_attention_heads @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return self.d_model def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = copy.deepcopy(self.__dict__) if self.backbone_config is not None: __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output class _A ( __UpperCAmelCase ): UpperCamelCase__ : Any = version.parse('''1.11''' ) @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ]) @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return 1E-5 @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return 12
60
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __snake_case :str = get_logger() __snake_case :Optional[dict] = None class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' super().__init__(features=__SCREAMING_SNAKE_CASE) import jax from jaxlib.xla_client import Device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): raise ValueError( F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') __a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' F'device: {str(jax.devices()[0])}.') __a = str(jax.devices()[0]) __a = jnp_array_kwargs @staticmethod def _lowerCamelCase ( ): '''simple docstring''' import jax return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()} def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column: if all( isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0) return column def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))): return value elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() __a = {} if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __a = {'''dtype''': jnp.intaa} else: __a = {'''dtype''': jnp.intaa} elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): __a = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = np.asarray(__SCREAMING_SNAKE_CASE) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs}) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array): __a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)): return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) return self._tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict): '''simple docstring''' return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE) return self.recursive_tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0]) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) __a = self._consolidate(__SCREAMING_SNAKE_CASE) return column def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for column_name in batch: __a = self._consolidate(batch[column_name]) return batch
60
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case :str = logging.get_logger(__name__) __snake_case :Tuple = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = '''efficientnet''' def __init__( self : str , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 600 , __SCREAMING_SNAKE_CASE : float = 2.0 , __SCREAMING_SNAKE_CASE : float = 3.1 , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : List[int] = [3, 3, 5, 3, 5, 5, 3] , __SCREAMING_SNAKE_CASE : List[int] = [32, 16, 24, 40, 80, 112, 192] , __SCREAMING_SNAKE_CASE : List[int] = [16, 24, 40, 80, 112, 192, 320] , __SCREAMING_SNAKE_CASE : List[int] = [] , __SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 2, 1, 2, 1] , __SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 3, 3, 4, 1] , __SCREAMING_SNAKE_CASE : List[int] = [1, 6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : float = 0.25 , __SCREAMING_SNAKE_CASE : str = "swish" , __SCREAMING_SNAKE_CASE : int = 2_560 , __SCREAMING_SNAKE_CASE : str = "mean" , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 0.0_01 , __SCREAMING_SNAKE_CASE : float = 0.99 , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : float = 0.2 , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) __a = num_channels __a = image_size __a = width_coefficient __a = depth_coefficient __a = depth_divisor __a = kernel_sizes __a = in_channels __a = out_channels __a = depthwise_padding __a = strides __a = num_block_repeats __a = expand_ratios __a = squeeze_expansion_ratio __a = hidden_act __a = hidden_dim __a = pooling_type __a = initializer_range __a = batch_norm_eps __a = batch_norm_momentum __a = dropout_rate __a = drop_connect_rate __a = sum(__SCREAMING_SNAKE_CASE) * 4 class _A ( __UpperCAmelCase ): UpperCamelCase__ : Any = version.parse('''1.11''' ) @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return 1E-5
60
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __snake_case :Tuple = logging.getLogger(__name__) if __name__ == "__main__": __snake_case :Union[str, Any] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0522, type=int) __snake_case :List[str] = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, '''rb''') as fp: __snake_case :Optional[Any] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') __snake_case :Dict = Counter() for tk_ids in data: counter.update(tk_ids) __snake_case :Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): __snake_case :Any = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __snake_case :Tuple = '''src/diffusers''' # Matches is_xxx_available() __snake_case :Optional[Any] = re.compile(r'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla __snake_case :int = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') __snake_case :int = ''' {0} = None ''' __snake_case :Any = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' __snake_case :List[str] = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def __snake_case ( _UpperCAmelCase ): __a = _re_backend.findall(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None return "_and_".join(_UpperCAmelCase ) def __snake_case ( ): with open(os.path.join(_UpperCAmelCase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.readlines() # Get to the point we do the actual imports for type checking __a = 0 __a = {} # Go through the end of the file while line_index < len(_UpperCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __a = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 __a = [] # Until we unindent, add backend objects to the list while line_index < len(_UpperCAmelCase ) and len(lines[line_index] ) > 1: __a = lines[line_index] __a = _re_single_line_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_UpperCAmelCase ) > 0: __a = objects else: line_index += 1 return backend_specific_objects def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if name.isupper(): return DUMMY_CONSTANT.format(_UpperCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase ) else: return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase=None ): if backend_specific_objects is None: __a = read_init() # For special correspondence backend to module name as used in the function requires_modulename __a = {} for backend, objects in backend_specific_objects.items(): __a = '''[''' + ''', '''.join(f'"{b}"' for b in backend.split('''_and_''' ) ) + ''']''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase ) for o in objects] ) __a = dummy_file return dummy_files def __snake_case ( _UpperCAmelCase=False ): __a = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __a = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. __a = os.path.join(_UpperCAmelCase , '''utils''' ) __a = { backend: os.path.join(_UpperCAmelCase , f'dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py' ) for backend in dummy_files.keys() } __a = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_UpperCAmelCase ): with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.read() else: __a = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py as the main ' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' f'diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py. Run `make fix-copies` ' '''to fix this.''' ) if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __snake_case :Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
60
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case :List[str] = HfApi() __snake_case :str = {} # fmt: off __snake_case :Optional[Any] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __snake_case :Union[str, Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __snake_case :str = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __snake_case :List[Any] = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __snake_case :Any = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __snake_case :List[str] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __snake_case :Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __snake_case :Tuple = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __snake_case :List[Any] = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __snake_case :Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __snake_case :Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __snake_case :List[str] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __snake_case :Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __snake_case :List[str] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __snake_case :Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __snake_case :List[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case :str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case :List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case :Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
60
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __snake_case :Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case :Dict = 12_8022 __snake_case :List[Any] = 12_8028 @require_sentencepiece class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Dict = MaMaaaTokenizer UpperCamelCase__ : Any = False UpperCamelCase__ : Tuple = False UpperCamelCase__ : List[Any] = True def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' super().setUp() __a = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] __a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE)))) __a = Path(self.tmpdirname) save_json(__SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''spm_file''']) __a = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return ( "This is a test", "This is a test", ) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = '''</s>''' __a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self.get_tokenizer() __a = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(__SCREAMING_SNAKE_CASE) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' pass def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.get_tokenizer() __a = tokenizer.tokenize('''This is a test''') self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [2, 3, 4, 5, 6] , ) __a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) __a = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , '''This is a test''') @slow def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): UpperCamelCase__ : int = '''facebook/m2m100_418M''' UpperCamelCase__ : int = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCamelCase__ : List[Any] = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCamelCase__ : List[Any] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2] @classmethod def _lowerCamelCase ( cls : Union[str, Any]): '''simple docstring''' __a = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') __a = 1 return cls def _lowerCamelCase ( self : Tuple): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128_006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128_022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128_076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128_063) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.tokenizer.get_vocab() self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = '''en''' __a = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids) # fmt: off __a = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on __a = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE) __a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = tempfile.mkdtemp() __a = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE) __a = MaMaaaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertDictEqual(new_tok.lang_token_to_id , __SCREAMING_SNAKE_CASE) @require_torch def _lowerCamelCase ( self : int): '''simple docstring''' __a = '''en''' __a = '''fr''' __a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') __a = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: __a = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) __a = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def _lowerCamelCase ( self : Any): '''simple docstring''' __a = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) __a = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { # en_XX, A, test, EOS '''input_ids''': [[128_022, 58, 4_183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128_006, } , )
60
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
60
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __snake_case :Tuple = logging.getLogger(__name__) if __name__ == "__main__": __snake_case :Union[str, Any] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0522, type=int) __snake_case :List[str] = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, '''rb''') as fp: __snake_case :Optional[Any] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') __snake_case :Dict = Counter() for tk_ids in data: counter.update(tk_ids) __snake_case :Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): __snake_case :Any = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __snake_case :List[Any] = '''zero2''' __snake_case :Optional[Any] = '''zero3''' __snake_case :str = [ZEROa, ZEROa] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test __snake_case :List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _A ( __UpperCAmelCase ): @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' pass def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) self.do_checks(__SCREAMING_SNAKE_CASE) return output_dir def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE) __a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __a = self.get_launcher(__SCREAMING_SNAKE_CASE) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env()) return output_dir def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False): '''simple docstring''' __a = min(2 , get_gpu_count()) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
60
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _A : def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=2 , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = is_training __a = use_labels __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = type_sequence_label_size __a = initializer_range __a = scope __a = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __a = (image_size // patch_size) ** 2 __a = num_patches + 2 def _lowerCamelCase ( self : str): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size) __a = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : Any): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = TFDeiTModel(config=__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = TFDeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __a = 1 __a = TFDeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE) __a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self.type_sequence_label_size __a = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __a = 1 __a = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE) __a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase__ : int = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase__ : Optional[int] = False UpperCamelCase__ : Optional[Any] = False UpperCamelCase__ : str = False UpperCamelCase__ : Dict = False def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = TFDeiTModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''') def _lowerCamelCase ( self : int): '''simple docstring''' pass def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__SCREAMING_SNAKE_CASE) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) __a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Dense)) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__SCREAMING_SNAKE_CASE) __a = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=False): '''simple docstring''' __a = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def _lowerCamelCase ( self : List[Any]): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = TFDeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertIsNotNone(__SCREAMING_SNAKE_CASE) def __snake_case ( ): __a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _A ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''') if is_vision_available() else None ) @slow def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''') __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''') # forward pass __a = model(**__SCREAMING_SNAKE_CASE) # verify the logits __a = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE) __a = tf.constant([-1.02_66, 0.19_12, -1.28_61]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
60
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected string as input, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) __a = input_str.split('''_''' ) __a = 0 if use_pascal else 1 __a = words[start_index:] __a = [word[0].upper() + word[1:] for word in words_to_capitalize] __a = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
60
1
from sklearn.metrics import mean_squared_error import datasets __snake_case :Tuple = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' __snake_case :List[str] = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' __snake_case :Dict = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : int): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def _lowerCamelCase ( self : Any): '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''')), "references": datasets.Sequence(datasets.Value('''float''')), } else: return { "predictions": datasets.Value('''float'''), "references": datasets.Value('''float'''), } def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="uniform_average" , __SCREAMING_SNAKE_CASE : List[str]=True): '''simple docstring''' __a = mean_squared_error( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , multioutput=__SCREAMING_SNAKE_CASE , squared=__SCREAMING_SNAKE_CASE) return {"mse": mse}
60
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _A : UpperCamelCase__ : str UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a , __a , __a = _str_to_version_tuple(self.version_str) def __repr__( self : Tuple): '''simple docstring''' return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.major, self.minor, self.patch def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return Version(__SCREAMING_SNAKE_CASE) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return other raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.') def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' try: __a = self._validate_operand(__SCREAMING_SNAKE_CASE) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self._validate_operand(__SCREAMING_SNAKE_CASE) return self.tuple < other.tuple def __hash__( self : Optional[Any]): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _lowerCamelCase ( self : int): '''simple docstring''' return self.version_str def __snake_case ( _UpperCAmelCase ): __a = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
60
1
def __snake_case ( _UpperCAmelCase ): __a = [1] __a , __a , __a = 0, 0, 0 __a = ugly_nums[ia] * 2 __a = ugly_nums[ia] * 3 __a = ugly_nums[ia] * 5 for _ in range(1 , _UpperCAmelCase ): __a = min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ugly_nums.append(_UpperCAmelCase ) if next_num == next_a: ia += 1 __a = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __a = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __a = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'{ugly_numbers(200) = }')
60
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case :int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class _A ( tr.AbstractTransform ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "): '''simple docstring''' __a = sentence_delimiter def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return list(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = [] for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE): chars.extend(self.process_string(__SCREAMING_SNAKE_CASE)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1: chars.append(self.sentence_delimiter) return chars __snake_case :Any = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case :Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case :Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __snake_case :Tuple = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' __snake_case :Tuple = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
60
1
def __snake_case ( _UpperCAmelCase = 1000 ): __a = -1 __a = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c __a = (n * n - 2 * a * n) // (2 * n - 2 * a) __a = n - a - b if c * c == (a * a + b * b): __a = a * b * c if candidate >= product: __a = candidate return product if __name__ == "__main__": print(f'{solution() = }')
60
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = ['''ViTFeatureExtractor'''] __snake_case :Optional[Any] = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :str = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
1
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _A ( __UpperCAmelCase ): def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : str = "▁" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<unk>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "</s>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<pad>" , ): '''simple docstring''' __a = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } __a = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): __a = token_dict['''token'''] __a = Tokenizer(Unigram()) __a = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''') , ''' '''), normalizers.Lowercase(), ]) __a = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE), pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE), pre_tokenizers.Punctuation(), ]) __a = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE) __a = TemplateProcessing( single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) __a = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 8_000 , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = trainers.UnigramTrainer( vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = [files] self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE) self.add_unk_id() def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[Iterator[str], Iterator[Iterator[str]]] , __SCREAMING_SNAKE_CASE : int = 8_000 , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = trainers.UnigramTrainer( vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , ) self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE) self.add_unk_id() def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = json.loads(self._tokenizer.to_str()) __a = self.special_tokens['''unk''']['''id'''] __a = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE))
60
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[str] = GPTSwaTokenizer UpperCamelCase__ : Dict = False UpperCamelCase__ : int = True UpperCamelCase__ : List[Any] = False def _lowerCamelCase ( self : List[Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = '''This is a test''' __a = '''This is a test''' return input_text, output_text def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''<s>''' __a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000) def _lowerCamelCase ( self : Dict): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_000) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = tokenizer.tokenize('''This is a test''') self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842]) __a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on __a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) self.assertListEqual( __SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) __a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def _lowerCamelCase ( self : Any): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] __a = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # Test that decode_fast returns the input text for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Any): '''simple docstring''' __a = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off __a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
60
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __snake_case ( _UpperCAmelCase ): __a = {} __a = tokenizer(example['''content'''] , truncation=_UpperCAmelCase )['''input_ids'''] __a = len(example['''content'''] ) / len(output['''input_ids'''] ) return output __snake_case :Tuple = HfArgumentParser(PretokenizationArguments) __snake_case :Optional[int] = parser.parse_args() if args.num_workers is None: __snake_case :List[str] = multiprocessing.cpu_count() __snake_case :Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir) __snake_case :List[Any] = time.time() __snake_case :Union[str, Any] = load_dataset(args.dataset_name, split='''train''') print(f'Dataset loaded in {time.time()-t_start:.2f}s') __snake_case :Optional[Any] = time.time() __snake_case :Optional[Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'Dataset tokenized in {time.time()-t_start:.2f}s') __snake_case :int = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'Data pushed to the hub in {time.time()-t_start:.2f}s')
60
from __future__ import annotations __snake_case :Optional[Any] = [] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = 1 solve(_UpperCAmelCase , row + 1 ) __a = 0 return False def __snake_case ( _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __snake_case :Optional[Any] = 8 __snake_case :Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
60
1
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : List[str]=[32, 64, 128] , __SCREAMING_SNAKE_CASE : List[Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : str=2.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=10 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : str=["stage1", "stage2"] , __SCREAMING_SNAKE_CASE : List[Any]=[1, 2] , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = embed_dim __a = hidden_sizes __a = depths __a = num_heads __a = window_size __a = mlp_ratio __a = qkv_bias __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = drop_path_rate __a = hidden_act __a = use_absolute_embeddings __a = patch_norm __a = layer_norm_eps __a = initializer_range __a = is_training __a = scope __a = use_labels __a = type_sequence_label_size __a = encoder_stride __a = out_features __a = out_indices def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size) __a = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str]): '''simple docstring''' return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = FocalNetModel(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) __a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) __a = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1]) # verify backbone works with out_features=None __a = None __a = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = FocalNetForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __a = 1 __a = FocalNetForMaskedImageModeling(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = self.type_sequence_label_size __a = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __a = 1 __a = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Optional[int] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) UpperCamelCase__ : Optional[Any] = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ : Optional[Any] = False UpperCamelCase__ : int = False UpperCamelCase__ : int = False UpperCamelCase__ : Dict = False UpperCamelCase__ : Dict = False def _lowerCamelCase ( self : str): '''simple docstring''' __a = FocalNetModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE) @unittest.skip(reason='''FocalNet does not use inputs_embeds''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' pass @unittest.skip(reason='''FocalNet does not use feedforward chunking''') def _lowerCamelCase ( self : Any): '''simple docstring''' pass def _lowerCamelCase ( self : Any): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: __a = model_class(__SCREAMING_SNAKE_CASE) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear)) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: __a = model_class(__SCREAMING_SNAKE_CASE) __a = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' __a = model_class(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) __a = outputs.hidden_states __a = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # FocalNet has a different seq_length __a = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) __a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) __a = outputs.reshaped_hidden_states self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) __a , __a , __a , __a = reshaped_hidden_states[0].shape __a = ( reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() __a = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: __a = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() __a = 3 __a = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) __a = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) __a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: __a = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width)) @slow def _lowerCamelCase ( self : str): '''simple docstring''' for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = FocalNetModel.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertIsNotNone(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() __a = _config_zero_init(__SCREAMING_SNAKE_CASE) for model_class in self.all_model_classes: __a = model_class(config=__SCREAMING_SNAKE_CASE) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class _A ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None @slow def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(__SCREAMING_SNAKE_CASE) __a = self.default_image_processor __a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') __a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE) # forward pass with torch.no_grad(): __a = model(**__SCREAMING_SNAKE_CASE) # verify the logits __a = torch.Size((1, 1_000)) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE) __a = torch.tensor([0.21_66, -0.43_68, 0.21_91]).to(__SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281) @require_torch class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () UpperCamelCase__ : int = FocalNetConfig UpperCamelCase__ : Dict = False def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = FocalNetModelTester(self)
60
def __snake_case ( _UpperCAmelCase ): __a = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( _UpperCAmelCase ): __a = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __a = remove_duplicates(key.upper() ) __a = len(_UpperCAmelCase ) # First fill cipher with key characters __a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_UpperCAmelCase ) , 26 ): __a = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __a = alphabet[i - offset] __a = char return cipher_alphabet def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( ): __a = input('''Enter message to encode or decode: ''' ).strip() __a = input('''Enter keyword: ''' ).strip() __a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __a = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __a = create_cipher_map(_UpperCAmelCase ) print(func(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __snake_case :Optional[int] = '''src/diffusers''' __snake_case :List[str] = '''.''' # This is to make sure the diffusers module imported is the one in the repo. __snake_case :Union[str, Any] = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) __snake_case :List[str] = spec.loader.load_module() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return line.startswith(_UpperCAmelCase ) or len(_UpperCAmelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _UpperCAmelCase ) is not None def __snake_case ( _UpperCAmelCase ): __a = object_name.split('''.''' ) __a = 0 # First let's find the module where our object lives. __a = parts[i] while i < len(_UpperCAmelCase ) and not os.path.isfile(os.path.join(_UpperCAmelCase , f'{module}.py' ) ): i += 1 if i < len(_UpperCAmelCase ): __a = os.path.join(_UpperCAmelCase , parts[i] ) if i >= len(_UpperCAmelCase ): raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' ) with open(os.path.join(_UpperCAmelCase , f'{module}.py' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.readlines() # Now let's find the class / func in the code! __a = '''''' __a = 0 for name in parts[i + 1 :]: while ( line_index < len(_UpperCAmelCase ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_UpperCAmelCase ): raise ValueError(f' {object_name} does not match any function or class in {module}.' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __a = line_index while line_index < len(_UpperCAmelCase ) and _should_continue(lines[line_index] , _UpperCAmelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __a = lines[start_index:line_index] return "".join(_UpperCAmelCase ) __snake_case :int = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') __snake_case :List[str] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') __snake_case :int = re.compile(r'''<FILL\s+[^>]*>''') def __snake_case ( _UpperCAmelCase ): __a = code.split('''\n''' ) __a = 0 while idx < len(_UpperCAmelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_UpperCAmelCase ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def __snake_case ( _UpperCAmelCase ): __a = len(get_indent(_UpperCAmelCase ) ) > 0 if has_indent: __a = f'class Bla:\n{code}' __a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_UpperCAmelCase ) __a = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) __a , __a = style_docstrings_in_code(_UpperCAmelCase ) return result[len('''class Bla:\n''' ) :] if has_indent else result def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ): with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.readlines() __a = [] __a = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_UpperCAmelCase ): __a = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __a , __a , __a = search.groups() __a = find_code_in_diffusers(_UpperCAmelCase ) __a = get_indent(_UpperCAmelCase ) __a = line_index + 1 if indent == theoretical_indent else line_index + 2 __a = theoretical_indent __a = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __a = True while line_index < len(_UpperCAmelCase ) and should_continue: line_index += 1 if line_index >= len(_UpperCAmelCase ): break __a = lines[line_index] __a = _should_continue(_UpperCAmelCase , _UpperCAmelCase ) and re.search(f'^{indent}# End copy' , _UpperCAmelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __a = lines[start_index:line_index] __a = ''''''.join(_UpperCAmelCase ) # Remove any nested `Copied from` comments to avoid circular copies __a = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_UpperCAmelCase ) is None] __a = '''\n'''.join(_UpperCAmelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(_UpperCAmelCase ) > 0: __a = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __a = [_re_replace_pattern.search(_UpperCAmelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue __a , __a , __a = pattern.groups() __a = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if option.strip() == "all-casing": __a = re.sub(obja.lower() , obja.lower() , _UpperCAmelCase ) __a = re.sub(obja.upper() , obja.upper() , _UpperCAmelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __a = blackify(lines[start_index - 1] + theoretical_code ) __a = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __a = lines[:start_index] + [theoretical_code] + lines[line_index:] __a = start_index + 1 if overwrite and len(_UpperCAmelCase ) > 0: # Warn the user a file has been modified. print(f'Detected changes, rewriting {filename}.' ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_UpperCAmelCase ) return diffs def __snake_case ( _UpperCAmelCase = False ): __a = glob.glob(os.path.join(_UpperCAmelCase , '''**/*.py''' ) , recursive=_UpperCAmelCase ) __a = [] for filename in all_files: __a = is_copy_consistent(_UpperCAmelCase , _UpperCAmelCase ) diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs] if not overwrite and len(_UpperCAmelCase ) > 0: __a = '''\n'''.join(_UpperCAmelCase ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": __snake_case :Dict = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __snake_case :str = parser.parse_args() check_copies(args.fix_and_overwrite)
60
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __snake_case :List[Any] = None __snake_case :Dict = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case :Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __snake_case :Optional[Any] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __snake_case :Optional[int] = '''▁''' class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Dict = BarthezTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = False if not self.vocab_file else True def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
1
from __future__ import annotations from collections.abc import Callable __snake_case :List[str] = list[list[float | int]] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = len(_UpperCAmelCase ) __a = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] __a = 42 __a = 42 __a = 42 __a = 42 __a = 42 __a = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): __a = matrix[row][col] __a = vector[row][0] __a = 0 __a = 0 while row < size and col < size: # pivoting __a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __a , __a = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): __a = augmented[rowa][col] / augmented[row][col] __a = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): __a = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def __snake_case ( _UpperCAmelCase ): __a = len(_UpperCAmelCase ) __a = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] __a = [[0] for _ in range(_UpperCAmelCase )] __a = 42 __a = 42 __a = 42 __a = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): __a = (x_val + 1) ** (size - col - 1) __a = y_val __a = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def __snake_case ( _UpperCAmelCase ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __snake_case ( _UpperCAmelCase = question_function , _UpperCAmelCase = 10 ): __a = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] __a = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __a = 0 __a = 42 __a = 42 for poly in polynomials: __a = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f'{solution() = }')
60
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __snake_case ( _UpperCAmelCase ): __a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(rows * cols * num_images ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = labels_dense.shape[0] __a = numpy.arange(_UpperCAmelCase ) * num_classes __a = numpy.zeros((num_labels, num_classes) ) __a = 1 return labels_one_hot @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(_UpperCAmelCase ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class _A : @deprecated( __SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) __a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype) if fake_data: __a = 10_000 __a = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' __a = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __a = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __a = images.astype(numpy.floataa) __a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0) __a = images __a = labels __a = 0 __a = 0 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self._images @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._labels @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._num_examples @property def _lowerCamelCase ( self : str): '''simple docstring''' return self._epochs_completed def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True): '''simple docstring''' if fake_data: __a = [1] * 784 __a = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__SCREAMING_SNAKE_CASE)], [fake_label for _ in range(__SCREAMING_SNAKE_CASE)], ) __a = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perma] __a = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __a = self._num_examples - start __a = self._images[start : self._num_examples] __a = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perm] __a = self.labels[perm] # Start next epoch __a = 0 __a = batch_size - rest_num_examples __a = self._index_in_epoch __a = self._images[start:end] __a = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size __a = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __a = f.size() print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( _UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __a = fake() __a = fake() __a = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __a = DEFAULT_SOURCE_URL __a = '''train-images-idx3-ubyte.gz''' __a = '''train-labels-idx1-ubyte.gz''' __a = '''t10k-images-idx3-ubyte.gz''' __a = '''t10k-labels-idx1-ubyte.gz''' __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __a = ( '''Validation size should be between 0 and ''' f'{len(_UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(_UpperCAmelCase ) __a = train_images[:validation_size] __a = train_labels[:validation_size] __a = train_images[validation_size:] __a = train_labels[validation_size:] __a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
60
1
import tensorflow as tf from ...tf_utils import shape_list class _A ( tf.keras.layers.Layer ): def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Dict=False , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) __a = vocab_size __a = d_embed __a = d_proj __a = cutoffs + [vocab_size] __a = [0] + self.cutoffs __a = div_val __a = self.cutoffs[0] __a = len(self.cutoffs) - 1 __a = self.shortlist_size + self.n_clusters __a = keep_order __a = [] __a = [] def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' if self.n_clusters > 0: __a = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name='''cluster_weight''') __a = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name='''cluster_bias''') if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: __a = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F'out_projs_._{i}' , ) self.out_projs.append(__SCREAMING_SNAKE_CASE) else: self.out_projs.append(__SCREAMING_SNAKE_CASE) __a = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F'out_layers_._{i}_._weight' , ) __a = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): __a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a = self.d_embed // (self.div_val**i) __a = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F'out_projs_._{i}') self.out_projs.append(__SCREAMING_SNAKE_CASE) __a = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F'out_layers_._{i}_._weight' , ) __a = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias)) super().build(__SCREAMING_SNAKE_CASE) @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None): '''simple docstring''' __a = x if proj is not None: __a = tf.einsum('''ibd,ed->ibe''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return tf.einsum('''ibd,nd->ibn''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) + b @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = shape_list(__SCREAMING_SNAKE_CASE) __a = tf.range(lp_size[0] , dtype=target.dtype) __a = tf.stack([r, target] , 1) return tf.gather_nd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False): '''simple docstring''' __a = 0 if self.n_clusters == 0: __a = self._logit(__SCREAMING_SNAKE_CASE , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0]) if target is not None: __a = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE) __a = tf.nn.log_softmax(__SCREAMING_SNAKE_CASE , axis=-1) else: __a = shape_list(__SCREAMING_SNAKE_CASE) __a = [] __a = tf.zeros(hidden_sizes[:2]) for i in range(len(self.cutoffs)): __a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: __a = (target >= l_idx) & (target < r_idx) __a = tf.where(__SCREAMING_SNAKE_CASE) __a = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) - l_idx if self.div_val == 1: __a = self.out_layers[0][0][l_idx:r_idx] __a = self.out_layers[0][1][l_idx:r_idx] else: __a = self.out_layers[i][0] __a = self.out_layers[i][1] if i == 0: __a = tf.concat([cur_W, self.cluster_weight] , 0) __a = tf.concat([cur_b, self.cluster_bias] , 0) __a = self._logit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.out_projs[0]) __a = tf.nn.log_softmax(__SCREAMING_SNAKE_CASE) out.append(head_logprob[..., : self.cutoffs[0]]) if target is not None: __a = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self._gather_logprob(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else: __a = self._logit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.out_projs[i]) __a = tf.nn.log_softmax(__SCREAMING_SNAKE_CASE) __a = self.cutoffs[0] + i - 1 # No probability for the head cluster __a = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__SCREAMING_SNAKE_CASE) if target is not None: __a = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self._gather_logprob(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__SCREAMING_SNAKE_CASE , -cur_logprob , shape_list(__SCREAMING_SNAKE_CASE)) __a = tf.concat(__SCREAMING_SNAKE_CASE , axis=-1) if target is not None: if return_mean: __a = tf.reduce_mean(__SCREAMING_SNAKE_CASE) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__SCREAMING_SNAKE_CASE) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__SCREAMING_SNAKE_CASE , name=self.name , aggregation='''mean''' if return_mean else '''''') return out
60
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _A ( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ): '''simple docstring''' __a = size if size is not None else {'''height''': 20, '''width''': 20} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_reduce_labels def _lowerCamelCase ( self : str): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(dataset[0]['''file'''] ) __a = Image.open(dataset[1]['''file'''] ) return image, map def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(ds[0]['''file'''] ) __a = Image.open(ds[1]['''file'''] ) __a = Image.open(ds[2]['''file'''] ) __a = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : int): '''simple docstring''' __a = BeitImageProcessingTester(self) @property def _lowerCamelCase ( self : int): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''')) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) __a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) __a = [] for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input __a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test not batched input (PIL images) __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched input (PIL images) __a , __a = prepare_semantic_batch_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 150) __a = True __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255)
60
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :List[Any] = logging.get_logger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : str = '''encoder-decoder''' UpperCamelCase__ : List[Any] = True def __init__( self : Any , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" __a = kwargs.pop('''encoder''') __a = encoder_config.pop('''model_type''') __a = kwargs.pop('''decoder''') __a = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig __a = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = True @classmethod def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') __a = True __a = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = copy.deepcopy(self.__dict__) __a = self.encoder.to_dict() __a = self.decoder.to_dict() __a = self.__class__.model_type return output
60
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : int): '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2''']) for i, r in enumerate(__SCREAMING_SNAKE_CASE): self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i]) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) __a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info , dset_from_dict.info) def _lowerCamelCase ( self : int): # checks what happens with missing columns '''simple docstring''' __a = [{'''col_1''': 1}, {'''col_2''': '''x'''}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertDictEqual(dset[0] , {'''col_1''': 1}) self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record '''simple docstring''' __a = [{'''col_1''': []}, {'''col_1''': [1, 2]}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64'''))) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Dataset.from_list([]) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0) self.assertListEqual(dset.column_names , [])
60
1
import math def __snake_case ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __a = range(3 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=1 , **_UpperCAmelCase ): __a = factor * value __a = value while not is_prime(_UpperCAmelCase ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **_UpperCAmelCase ) return value
60
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __snake_case ( _UpperCAmelCase ): __a = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def __snake_case ( _UpperCAmelCase ): __a = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') ) return token def __snake_case ( ): __a = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": __a = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": __a = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __a = [2, 2, 20] __a = [3, 12, 16] __a = [192, 768, 1024] __a = CvtForImageClassification(_UpperCAmelCase ) __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) __a = image_size __a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) ) __a = OrderedDict() __a = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __a = list_of_state_dict + cls_token(_UpperCAmelCase ) __a = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) __a = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __a = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __snake_case :str = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case :Dict = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
60
1
import re from filelock import FileLock try: import nltk __snake_case :List[Any] = True except (ImportError, ModuleNotFoundError): __snake_case :Optional[Any] = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def __snake_case ( _UpperCAmelCase ): re.sub('''<n>''' , '''''' , _UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
60
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( _UpperCAmelCase ): __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0 __a = image[None].transpose(0 , 3 , 1 , 2 ) __a = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class _A ( __UpperCAmelCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE) @torch.no_grad() def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = 1 elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): __a = image.shape[0] else: raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}') if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = preprocess(__SCREAMING_SNAKE_CASE) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters()).dtype __a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE) __a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE) # set timesteps and move to the correct device self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(__SCREAMING_SNAKE_CASE): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1) __a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # predict the noise residual __a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample __a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
60
1
__snake_case :Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' __snake_case :int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __snake_case :Optional[int] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
60
from __future__ import annotations from random import random from typing import Generic, TypeVar __snake_case :Any = TypeVar('''KT''') __snake_case :List[str] = TypeVar('''VT''') class _A ( Generic[KT, VT] ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None): '''simple docstring''' __a = key __a = value __a = [] def __repr__( self : Dict): '''simple docstring''' return F'Node({self.key}: {self.value})' @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return len(self.forward) class _A ( Generic[KT, VT] ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16): '''simple docstring''' __a = Node[KT, VT]() __a = 0 __a = p __a = max_level def __str__( self : Union[str, Any]): '''simple docstring''' __a = list(self) if len(__SCREAMING_SNAKE_CASE) == 0: return F'SkipList(level={self.level})' __a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4) __a = max(__SCREAMING_SNAKE_CASE , 4) + 4 __a = self.head __a = [] __a = node.forward.copy() lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) while len(node.forward) != 0: __a = node.forward[0] lines.append( F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) __a = node.forward lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE)) return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE) def __iter__( self : int): '''simple docstring''' __a = self.head while len(node.forward) != 0: yield node.forward[0].key __a = node.forward[0] def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = [] __a = self.head for i in reversed(range(self.level)): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __a = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__SCREAMING_SNAKE_CASE) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: for i, update_node in enumerate(__SCREAMING_SNAKE_CASE): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __a = node.forward[i] else: __a = update_node.forward[:i] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: __a = value else: __a = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE): update_vector.append(self.head) __a = level __a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for i, update_node in enumerate(update_vector[:level]): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i]) if update_node.level < i + 1: update_node.forward.append(__SCREAMING_SNAKE_CASE) else: __a = new_node def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: return node.value return None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value if len(_UpperCAmelCase ) != 4: print() assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def __snake_case ( ): __a = SkipList() assert skip_list.find('''Some key''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def __snake_case ( ): __a = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(_UpperCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __snake_case ( ): def is_sorted(_UpperCAmelCase ): return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) ) __a = SkipList() for i in range(10 ): skip_list.insert(_UpperCAmelCase , _UpperCAmelCase ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCAmelCase ) ) def __snake_case ( ): for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __snake_case ( ): __a = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
from ....utils import logging __snake_case :Any = logging.get_logger(__name__) class _A ( __UpperCAmelCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=2_048): '''simple docstring''' __a = config.__dict__ __a = modal_hidden_size if num_labels: __a = num_labels
60
__snake_case :str = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Return True if there is node that has not iterated. __a = [False] * len(_UpperCAmelCase ) __a = [s] __a = True while queue: __a = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCAmelCase ) __a = True __a = u return visited[t] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [-1] * (len(_UpperCAmelCase )) __a = 0 __a = [] __a = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = float('''Inf''' ) __a = sink while s != source: # Find the minimum value in select path __a = min(_UpperCAmelCase , graph[parent[s]][s] ) __a = parent[s] max_flow += path_flow __a = sink while v != source: __a = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __a = parent[v] for i in range(len(_UpperCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
60
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def __snake_case ( _UpperCAmelCase ): __a = {} __a = job['''started_at'''] __a = job['''completed_at'''] __a = date_parser.parse(_UpperCAmelCase ) __a = date_parser.parse(_UpperCAmelCase ) __a = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __a = start __a = end __a = duration_in_min return job_info def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=None ): __a = None if token is not None: __a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'} __a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' __a = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json() __a = {} try: job_time.update({job['''name''']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['''jobs''']} ) __a = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(_UpperCAmelCase ): __a = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json() job_time.update({job['''name''']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['''jobs''']} ) return job_time except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": __snake_case :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') __snake_case :int = parser.parse_args() __snake_case :Union[str, Any] = get_job_time(args.workflow_run_id) __snake_case :str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'{k}: {v["duration"]}')
60
from __future__ import annotations def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(_UpperCAmelCase ): print(f'{i}\t\t{d}' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [float('''inf''' )] * vertex_count __a = 0.0 for _ in range(vertex_count - 1 ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __a = distance[u] + w __a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() __snake_case :Dict = int(input('''Enter number of vertices: ''').strip()) __snake_case :Any = int(input('''Enter number of edges: ''').strip()) __snake_case :list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) __snake_case ,__snake_case ,__snake_case :int = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) __snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight} __snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip()) __snake_case :Optional[Any] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
60
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Any = logging.get_logger(__name__) __snake_case :Optional[int] = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = '''pix2struct_text_model''' UpperCamelCase__ : int = ['''past_key_values'''] UpperCamelCase__ : Dict = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=50_244 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : str=2_048 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : str=128 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : str=1.0 , __SCREAMING_SNAKE_CASE : int="gelu_new" , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=True , **__SCREAMING_SNAKE_CASE : str , ): '''simple docstring''' __a = vocab_size __a = hidden_size __a = d_kv __a = d_ff __a = num_layers __a = num_heads __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = use_cache __a = eos_token_id __a = decoder_start_token_id # for backwards compatibility __a = dense_act_fn super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) @classmethod def _lowerCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE) __a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''') == "pix2struct": __a = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''pix2struct_vision_model''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=768 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Tuple=2_048 , __SCREAMING_SNAKE_CASE : Optional[int]=64 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu_new" , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-6 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Tuple=1E-10 , __SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=4_096 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Tuple=128 , **__SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) __a = hidden_size __a = patch_embed_hidden_size __a = d_ff __a = dropout_rate __a = num_hidden_layers __a = num_attention_heads __a = initializer_range __a = initializer_factor __a = attention_dropout __a = layer_norm_eps __a = dense_act_fn __a = seq_len __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = d_kv @classmethod def _lowerCamelCase ( cls : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE) __a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''') == "pix2struct": __a = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = '''pix2struct''' UpperCamelCase__ : Union[str, Any] = True def __init__( self : str , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[str]=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) if text_config is None: __a = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''') if vision_config is None: __a = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''') __a = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE) __a = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE) __a = self.text_config.decoder_start_token_id __a = self.text_config.pad_token_id __a = self.text_config.eos_token_id __a = initializer_factor __a = initializer_range __a = self.initializer_range __a = self.initializer_range __a = is_vqa @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : PixaStructTextConfig , __SCREAMING_SNAKE_CASE : PixaStructVisionConfig , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = copy.deepcopy(self.__dict__) __a = self.text_config.to_dict() __a = self.vision_config.to_dict() __a = self.__class__.model_type return output
60
import os import sys import unittest __snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''') __snake_case :Any = ''' {0} = None ''' __snake_case :Dict = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __snake_case :str = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''') self.assertIsNone(__SCREAMING_SNAKE_CASE) __a = find_backend(''' if not is_tokenizers_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''') __a = find_backend(''' if not is_tensorflow_text_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''') __a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE) self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE) self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertModel''' , objects['''tf''']) self.assertIn('''FlaxBertModel''' , objects['''flax''']) self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text''']) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers''']) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = create_dummy_object('''CONSTANT''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''') __a = create_dummy_object('''function''' , '''\'torch\'''') self.assertEqual( __SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''') __a = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __a = create_dummy_object('''FakeClass''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']}) self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
60
1
import math def __snake_case ( _UpperCAmelCase = 100 ): __a = sum(i * i for i in range(1 , n + 1 ) ) __a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
60
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __snake_case :str = get_logger() __snake_case :Optional[dict] = None class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' super().__init__(features=__SCREAMING_SNAKE_CASE) import jax from jaxlib.xla_client import Device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): raise ValueError( F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') __a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' F'device: {str(jax.devices()[0])}.') __a = str(jax.devices()[0]) __a = jnp_array_kwargs @staticmethod def _lowerCamelCase ( ): '''simple docstring''' import jax return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()} def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column: if all( isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0) return column def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))): return value elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() __a = {} if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __a = {'''dtype''': jnp.intaa} else: __a = {'''dtype''': jnp.intaa} elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): __a = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = np.asarray(__SCREAMING_SNAKE_CASE) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs}) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array): __a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)): return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) return self._tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict): '''simple docstring''' return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE) return self.recursive_tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0]) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) __a = self._consolidate(__SCREAMING_SNAKE_CASE) return column def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for column_name in batch: __a = self._consolidate(batch[column_name]) return batch
60
1
from __future__ import annotations __snake_case :Any = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] __snake_case :List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def __snake_case ( _UpperCAmelCase ): __a = [] __a = len(_UpperCAmelCase ) for i in range(_UpperCAmelCase ): __a = -1 for j in range(i + 1 , _UpperCAmelCase ): if arr[i] < arr[j]: __a = arr[j] break result.append(_UpperCAmelCase ) return result def __snake_case ( _UpperCAmelCase ): __a = [] for i, outer in enumerate(_UpperCAmelCase ): __a = -1 for inner in arr[i + 1 :]: if outer < inner: __a = inner break result.append(_UpperCAmelCase ) return result def __snake_case ( _UpperCAmelCase ): __a = len(_UpperCAmelCase ) __a = [] __a = [-1] * arr_size for index in reversed(range(_UpperCAmelCase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __a = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __snake_case :Tuple = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
60
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __snake_case :Tuple = logging.getLogger(__name__) if __name__ == "__main__": __snake_case :Union[str, Any] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0522, type=int) __snake_case :List[str] = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, '''rb''') as fp: __snake_case :Optional[Any] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') __snake_case :Dict = Counter() for tk_ids in data: counter.update(tk_ids) __snake_case :Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): __snake_case :Any = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
1
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __snake_case :Dict = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): inspect_dataset(_UpperCAmelCase , _UpperCAmelCase ) __a = path + '''.py''' assert script_name in os.listdir(_UpperCAmelCase ) assert "__pycache__" not in os.listdir(_UpperCAmelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): inspect_metric(_UpperCAmelCase , _UpperCAmelCase ) __a = path + '''.py''' assert script_name in os.listdir(_UpperCAmelCase ) assert "__pycache__" not in os.listdir(_UpperCAmelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): with pytest.raises(_UpperCAmelCase ): get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = get_dataset_config_names(_UpperCAmelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = get_dataset_infos(_UpperCAmelCase ) assert list(infos.keys() ) == expected_configs __a = expected_configs[0] assert expected_config in infos __a = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = get_dataset_infos(_UpperCAmelCase ) assert expected_config in infos __a = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): with pytest.raises(_UpperCAmelCase ): get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
60
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case :List[str] = HfApi() __snake_case :str = {} # fmt: off __snake_case :Optional[Any] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __snake_case :Union[str, Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __snake_case :str = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __snake_case :List[Any] = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __snake_case :Any = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __snake_case :List[str] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __snake_case :Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __snake_case :Tuple = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __snake_case :List[Any] = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __snake_case :Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __snake_case :Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __snake_case :List[str] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __snake_case :Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __snake_case :List[str] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __snake_case :Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __snake_case :List[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case :str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case :List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case :Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
60
1
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __snake_case ( _UpperCAmelCase ): __a = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def __snake_case ( _UpperCAmelCase ): __a = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') ) return token def __snake_case ( ): __a = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": __a = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": __a = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __a = [2, 2, 20] __a = [3, 12, 16] __a = [192, 768, 1024] __a = CvtForImageClassification(_UpperCAmelCase ) __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) __a = image_size __a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) ) __a = OrderedDict() __a = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __a = list_of_state_dict + cls_token(_UpperCAmelCase ) __a = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) __a = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __a = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __snake_case :str = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case :Dict = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
60
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
60
1
def __snake_case ( _UpperCAmelCase ): __a = [0 for i in range(len(_UpperCAmelCase ) )] # initialize interval's left pointer and right pointer __a , __a = 0, 0 for i in range(1 , len(_UpperCAmelCase ) ): # case when current index is inside the interval if i <= right_pointer: __a = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __a = min_edge while go_next(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __a , __a = i, i + z_result[i] - 1 return z_result def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return i + z_result[i] < len(_UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __a = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_UpperCAmelCase ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
60
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __snake_case :List[Any] = '''zero2''' __snake_case :Optional[Any] = '''zero3''' __snake_case :str = [ZEROa, ZEROa] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test __snake_case :List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _A ( __UpperCAmelCase ): @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' pass def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) self.do_checks(__SCREAMING_SNAKE_CASE) return output_dir def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE) __a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __a = self.get_launcher(__SCREAMING_SNAKE_CASE) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env()) return output_dir def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False): '''simple docstring''' __a = min(2 , get_gpu_count()) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
60
1
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _A : UpperCamelCase__ : str UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a , __a , __a = _str_to_version_tuple(self.version_str) def __repr__( self : Tuple): '''simple docstring''' return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.major, self.minor, self.patch def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return Version(__SCREAMING_SNAKE_CASE) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return other raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.') def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' try: __a = self._validate_operand(__SCREAMING_SNAKE_CASE) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self._validate_operand(__SCREAMING_SNAKE_CASE) return self.tuple < other.tuple def __hash__( self : Optional[Any]): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _lowerCamelCase ( self : int): '''simple docstring''' return self.version_str def __snake_case ( _UpperCAmelCase ): __a = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
60
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected string as input, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) __a = input_str.split('''_''' ) __a = 0 if use_pascal else 1 __a = words[start_index:] __a = [word[0].upper() + word[1:] for word in words_to_capitalize] __a = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
60
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case :Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :int = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Optional[int] = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _A : UpperCamelCase__ : str UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a , __a , __a = _str_to_version_tuple(self.version_str) def __repr__( self : Tuple): '''simple docstring''' return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.major, self.minor, self.patch def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return Version(__SCREAMING_SNAKE_CASE) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return other raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.') def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' try: __a = self._validate_operand(__SCREAMING_SNAKE_CASE) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self._validate_operand(__SCREAMING_SNAKE_CASE) return self.tuple < other.tuple def __hash__( self : Optional[Any]): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _lowerCamelCase ( self : int): '''simple docstring''' return self.version_str def __snake_case ( _UpperCAmelCase ): __a = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
60
1
from typing import Union import fire import torch from tqdm import tqdm def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = "cpu" , _UpperCAmelCase = None ): __a = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(_UpperCAmelCase , torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) __a = v.half() if save_path is None: # overwrite src_path __a = src_path torch.save(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
60
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case :int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class _A ( tr.AbstractTransform ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "): '''simple docstring''' __a = sentence_delimiter def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return list(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = [] for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE): chars.extend(self.process_string(__SCREAMING_SNAKE_CASE)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1: chars.append(self.sentence_delimiter) return chars __snake_case :Any = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case :Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case :Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __snake_case :Tuple = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' __snake_case :Tuple = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
60
1
def __snake_case ( _UpperCAmelCase ): return " ".join( ''''''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
60
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = ['''ViTFeatureExtractor'''] __snake_case :Optional[Any] = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :str = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
1
from ..utils import DummyObject, requires_backends class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = ['''torch'''] def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[int] = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[Any] = ['''torch'''] def __init__( self : List[str] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : str = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''torch'''] def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[Any] = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Any = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Any = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Any = ['''torch'''] def __init__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[str] = ['''torch'''] def __init__( self : List[str] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) def __snake_case ( *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[int] = ['''torch'''] def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[str] = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = ['''torch'''] def __init__( self : List[str] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[str] = ['''torch'''] def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''torch'''] def __init__( self : List[str] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : str = ['''torch'''] def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Dict = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Dict = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Tuple = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : str = ['''torch'''] def __init__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Tuple = ['''torch'''] def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Any = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[Any] = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[Any] = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[int] = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[str] = ['''torch'''] def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Tuple = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[str] = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : str = ['''torch'''] def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Dict = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : int , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Any , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : str = ['''torch'''] def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''torch'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Tuple = ['''torch'''] def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : int = ['''torch'''] def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Dict , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Tuple = ['''torch'''] def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Tuple = ['''torch'''] def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[str] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : List[Any] = ['''torch'''] def __init__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : str , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : int , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = ['''torch'''] def __init__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Tuple , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch''']) class _A ( metaclass=__UpperCAmelCase ): UpperCamelCase__ : Optional[int] = ['''torch'''] def __init__( self : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' requires_backends(self , ['''torch''']) @classmethod def _lowerCamelCase ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' requires_backends(cls , ['''torch''']) @classmethod def _lowerCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' requires_backends(cls , ['''torch'''])
60
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[str] = GPTSwaTokenizer UpperCamelCase__ : Dict = False UpperCamelCase__ : int = True UpperCamelCase__ : List[Any] = False def _lowerCamelCase ( self : List[Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = '''This is a test''' __a = '''This is a test''' return input_text, output_text def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''<s>''' __a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000) def _lowerCamelCase ( self : Dict): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_000) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = tokenizer.tokenize('''This is a test''') self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842]) __a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on __a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) self.assertListEqual( __SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) __a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def _lowerCamelCase ( self : Any): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] __a = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # Test that decode_fast returns the input text for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Any): '''simple docstring''' __a = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off __a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
60
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Optional[Any] = logging.get_logger(__name__) __snake_case :Any = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[Any] = '''wavlm''' def __init__( self : int , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Optional[int]=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : List[Any]=3_072 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : int="group" , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE : int=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE : Any=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : Dict=320 , __SCREAMING_SNAKE_CASE : str=800 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : str=0.05 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Tuple=10 , __SCREAMING_SNAKE_CASE : str=320 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=100 , __SCREAMING_SNAKE_CASE : Optional[Any]=256 , __SCREAMING_SNAKE_CASE : List[str]=256 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[str]="mean" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=256 , __SCREAMING_SNAKE_CASE : Any=(512, 512, 512, 512, 1_500) , __SCREAMING_SNAKE_CASE : List[str]=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE : List[str]=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Any=80 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : int , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE) __a = hidden_size __a = feat_extract_norm __a = feat_extract_activation __a = list(__SCREAMING_SNAKE_CASE) __a = list(__SCREAMING_SNAKE_CASE) __a = list(__SCREAMING_SNAKE_CASE) __a = conv_bias __a = num_buckets __a = max_bucket_distance __a = num_conv_pos_embeddings __a = num_conv_pos_embedding_groups __a = len(self.conv_dim) __a = num_hidden_layers __a = intermediate_size __a = hidden_act __a = num_attention_heads __a = hidden_dropout __a = attention_dropout __a = activation_dropout __a = feat_proj_dropout __a = final_dropout __a = layerdrop __a = layer_norm_eps __a = initializer_range __a = num_ctc_classes __a = vocab_size __a = do_stable_layer_norm __a = use_weighted_layer_sum __a = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a = apply_spec_augment __a = mask_time_prob __a = mask_time_length __a = mask_time_min_masks __a = mask_feature_prob __a = mask_feature_length # parameters for pretraining with codevector quantized representations __a = num_codevectors_per_group __a = num_codevector_groups __a = contrastive_logits_temperature __a = num_negatives __a = codevector_dim __a = proj_codevector_dim __a = diversity_loss_weight # ctc loss __a = ctc_loss_reduction __a = ctc_zero_infinity # adapter __a = add_adapter __a = adapter_kernel_size __a = adapter_stride __a = num_adapter_layers __a = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __a = list(__SCREAMING_SNAKE_CASE) __a = list(__SCREAMING_SNAKE_CASE) __a = list(__SCREAMING_SNAKE_CASE) __a = xvector_output_dim @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
60
from __future__ import annotations __snake_case :Optional[Any] = [] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = 1 solve(_UpperCAmelCase , row + 1 ) __a = 0 return False def __snake_case ( _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __snake_case :Optional[Any] = 8 __snake_case :Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
60
1
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _A ( __UpperCAmelCase ): def __lt__( self : int , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' return self[-1] < other[-1] def __eq__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' return self[-1] == other[-1] def __snake_case ( _UpperCAmelCase ): __a = [] # sort into stacks for element in collection: __a = Stack([element] ) __a = bisect_left(_UpperCAmelCase , _UpperCAmelCase ) if i != len(_UpperCAmelCase ): stacks[i].append(_UpperCAmelCase ) else: stacks.append(_UpperCAmelCase ) # use a heap-based merge to merge stack efficiently __a = merge(*(reversed(_UpperCAmelCase ) for stack in stacks) ) return collection if __name__ == "__main__": __snake_case :str = input('''Enter numbers separated by a comma:\n''').strip() __snake_case :Union[str, Any] = [int(item) for item in user_input.split(''',''')] print(patience_sort(unsorted))
60
def __snake_case ( _UpperCAmelCase ): __a = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( _UpperCAmelCase ): __a = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __a = remove_duplicates(key.upper() ) __a = len(_UpperCAmelCase ) # First fill cipher with key characters __a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_UpperCAmelCase ) , 26 ): __a = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __a = alphabet[i - offset] __a = char return cipher_alphabet def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( ): __a = input('''Enter message to encode or decode: ''' ).strip() __a = input('''Enter keyword: ''' ).strip() __a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __a = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __a = create_cipher_map(_UpperCAmelCase ) print(func(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _A ( __UpperCAmelCase ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : List[Any]="last" , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_lengths __a = use_token_type_ids __a = use_labels __a = gelu_activation __a = sinusoidal_embeddings __a = causal __a = asm __a = n_langs __a = vocab_size __a = n_special __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_labels __a = num_choices __a = summary_type __a = use_proj __a = scope def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __a = random_attention_mask([self.batch_size, self.seq_length]) __a = None if self.use_input_lengths: __a = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) __a = None __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size) __a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __a = ids_tensor([self.batch_size] , 2).float() __a = ids_tensor([self.batch_size] , self.num_choices) __a = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _lowerCamelCase ( self : Dict): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' __a = FlaubertModel(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE , lengths=__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' __a = FlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' __a = FlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' __a = FlaubertForQuestionAnswering(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) __a = model( __SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , p_mask=__SCREAMING_SNAKE_CASE , ) __a = model( __SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , ) ((__a) , ) = result_with_labels.to_tuple() __a = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE) ((__a) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' __a = FlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) __a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' __a = self.num_labels __a = FlaubertForTokenClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , ): '''simple docstring''' __a = self.num_choices __a = FlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __a = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __a = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __a = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = config_and_inputs __a = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Any = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase__ : List[Any] = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=False): '''simple docstring''' __a = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE) __a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE) return inputs_dict def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = FlaubertModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , emb_dim=37) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = FlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertIsNotNone(__SCREAMING_SNAKE_CASE) @slow @require_torch_gpu def _lowerCamelCase ( self : Any): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __a = True __a = model_class(config=__SCREAMING_SNAKE_CASE) __a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = torch.jit.trace( __SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu'''))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''')) __a = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''') , map_location=__SCREAMING_SNAKE_CASE) loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE)) @require_torch class _A ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''') __a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]]) with torch.no_grad(): __a = model(__SCREAMING_SNAKE_CASE)[0] __a = torch.Size((1, 11, 768)) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE) __a = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
60
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __snake_case :List[Any] = None __snake_case :Dict = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case :Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __snake_case :Optional[Any] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __snake_case :Optional[int] = '''▁''' class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Dict = BarthezTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = False if not self.vocab_file else True def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _A ( unittest.TestCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = parent def _lowerCamelCase ( self : Any): '''simple docstring''' return {} def __snake_case ( ): __a = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' __a = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = MarkupLMFeatureExtractor if is_bsa_available() else None def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = MarkupLMFeatureExtractionTester(self) @property def _lowerCamelCase ( self : List[Any]): '''simple docstring''' return self.feature_extract_tester.prepare_feat_extract_dict() def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.feature_extraction_class() # Test not batched input __a = get_html_strings()[0] __a = feature_extractor(__SCREAMING_SNAKE_CASE) # fmt: off __a = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] __a = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , __SCREAMING_SNAKE_CASE) self.assertEqual(encoding.xpaths , __SCREAMING_SNAKE_CASE) # Test batched __a = get_html_strings() __a = feature_extractor(__SCREAMING_SNAKE_CASE) # fmt: off __a = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] __a = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes) , 2) self.assertEqual(len(encoding.xpaths) , 2) self.assertEqual(encoding.nodes , __SCREAMING_SNAKE_CASE) self.assertEqual(encoding.xpaths , __SCREAMING_SNAKE_CASE)
60
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __snake_case ( _UpperCAmelCase ): __a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(rows * cols * num_images ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = labels_dense.shape[0] __a = numpy.arange(_UpperCAmelCase ) * num_classes __a = numpy.zeros((num_labels, num_classes) ) __a = 1 return labels_one_hot @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(_UpperCAmelCase ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class _A : @deprecated( __SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) __a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype) if fake_data: __a = 10_000 __a = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' __a = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __a = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __a = images.astype(numpy.floataa) __a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0) __a = images __a = labels __a = 0 __a = 0 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self._images @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._labels @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._num_examples @property def _lowerCamelCase ( self : str): '''simple docstring''' return self._epochs_completed def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True): '''simple docstring''' if fake_data: __a = [1] * 784 __a = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__SCREAMING_SNAKE_CASE)], [fake_label for _ in range(__SCREAMING_SNAKE_CASE)], ) __a = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perma] __a = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __a = self._num_examples - start __a = self._images[start : self._num_examples] __a = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perm] __a = self.labels[perm] # Start next epoch __a = 0 __a = batch_size - rest_num_examples __a = self._index_in_epoch __a = self._images[start:end] __a = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size __a = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __a = f.size() print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( _UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __a = fake() __a = fake() __a = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __a = DEFAULT_SOURCE_URL __a = '''train-images-idx3-ubyte.gz''' __a = '''train-labels-idx1-ubyte.gz''' __a = '''t10k-images-idx3-ubyte.gz''' __a = '''t10k-labels-idx1-ubyte.gz''' __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __a = ( '''Validation size should be between 0 and ''' f'{len(_UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(_UpperCAmelCase ) __a = train_images[:validation_size] __a = train_labels[:validation_size] __a = train_images[validation_size:] __a = train_labels[validation_size:] __a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
60
1
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes''')) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''neck_hidden_sizes''')) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads''')) class _A : def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=640 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Optional[Any]="silu" , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = last_hidden_size __a = num_attention_heads __a = hidden_act __a = conv_kernel_size __a = output_stride __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = classifier_dropout_prob __a = use_labels __a = is_training __a = num_labels __a = initializer_range __a = scope def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels) __a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) __a = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCamelCase ( self : List[Any]): '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = MobileViTModel(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' __a = self.num_labels __a = MobileViTForImageClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = self.num_labels __a = MobileViTForSemanticSegmentation(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() __a = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : str = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase__ : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase__ : Tuple = False UpperCamelCase__ : Any = False UpperCamelCase__ : Tuple = False UpperCamelCase__ : Optional[Any] = False def _lowerCamelCase ( self : Any): '''simple docstring''' __a = MobileViTModelTester(self) __a = MobileViTConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''') def _lowerCamelCase ( self : Tuple): '''simple docstring''' pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''') def _lowerCamelCase ( self : str): '''simple docstring''' pass @unittest.skip(reason='''MobileViT does not output attentions''') def _lowerCamelCase ( self : int): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__SCREAMING_SNAKE_CASE) __a = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _lowerCamelCase ( self : int): '''simple docstring''' pass def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' def check_hidden_states_output(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]): __a = model_class(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) __a = outputs.hidden_states __a = 5 self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __a = 2 for i in range(len(__SCREAMING_SNAKE_CASE)): self.assertListEqual( list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MobileViTModel.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertIsNotNone(__SCREAMING_SNAKE_CASE) def __snake_case ( ): __a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : int): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''') if is_vision_available() else None @slow def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''').to(__SCREAMING_SNAKE_CASE) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE) # forward pass with torch.no_grad(): __a = model(**__SCREAMING_SNAKE_CASE) # verify the logits __a = torch.Size((1, 1_000)) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE) __a = torch.tensor([-1.93_64, -1.23_27, -0.46_53]).to(__SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4)) @slow def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''') __a = model.to(__SCREAMING_SNAKE_CASE) __a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''') __a = prepare_img() __a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE) # forward pass with torch.no_grad(): __a = model(**__SCREAMING_SNAKE_CASE) __a = outputs.logits # verify the logits __a = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE) __a = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ] , device=__SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4)) @slow def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''') __a = model.to(__SCREAMING_SNAKE_CASE) __a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''') __a = prepare_img() __a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE) # forward pass with torch.no_grad(): __a = model(**__SCREAMING_SNAKE_CASE) __a = outputs.logits.detach().cpu() __a = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)]) __a = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE) __a = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE) __a = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
60
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _A ( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ): '''simple docstring''' __a = size if size is not None else {'''height''': 20, '''width''': 20} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_reduce_labels def _lowerCamelCase ( self : str): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(dataset[0]['''file'''] ) __a = Image.open(dataset[1]['''file'''] ) return image, map def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(ds[0]['''file'''] ) __a = Image.open(ds[1]['''file'''] ) __a = Image.open(ds[2]['''file'''] ) __a = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : int): '''simple docstring''' __a = BeitImageProcessingTester(self) @property def _lowerCamelCase ( self : int): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''')) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) __a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) __a = [] for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input __a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test not batched input (PIL images) __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched input (PIL images) __a , __a = prepare_semantic_batch_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 150) __a = True __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255)
60
1
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _A : UpperCamelCase__ : Union[str, Any] = None def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_dict) __a = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: __a = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''') feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE) __a = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict()) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: __a = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE)[0] check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE) __a = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict()) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.feature_extraction_class() self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
60
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : int): '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2''']) for i, r in enumerate(__SCREAMING_SNAKE_CASE): self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i]) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) __a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info , dset_from_dict.info) def _lowerCamelCase ( self : int): # checks what happens with missing columns '''simple docstring''' __a = [{'''col_1''': 1}, {'''col_2''': '''x'''}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertDictEqual(dset[0] , {'''col_1''': 1}) self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record '''simple docstring''' __a = [{'''col_1''': []}, {'''col_1''': [1, 2]}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64'''))) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Dataset.from_list([]) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0) self.assertListEqual(dset.column_names , [])
60
1
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __snake_case :Optional[Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class _A ( __UpperCAmelCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=1): '''simple docstring''' __a = tokenizer __a = dataset __a = len(__SCREAMING_SNAKE_CASE) if n_tasks is None else n_tasks __a = n_copies def __iter__( self : int): '''simple docstring''' __a = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) __a = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _A ( __UpperCAmelCase ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = start_length __a = eof_strings __a = tokenizer def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) __a = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(__SCREAMING_SNAKE_CASE) def __snake_case ( _UpperCAmelCase ): __a = re.split('''(%s)''' % '''|'''.join(_UpperCAmelCase ) , _UpperCAmelCase ) # last string should be "" return "".join(string_list[:-2] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=20 , **_UpperCAmelCase ): __a = defaultdict(_UpperCAmelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_UpperCAmelCase ) ): with torch.no_grad(): __a = batch['''ids'''].shape[-1] __a = accelerator.unwrap_model(_UpperCAmelCase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_UpperCAmelCase , **_UpperCAmelCase ) # each task is generated batch_size times __a = batch['''task_id'''].repeat(_UpperCAmelCase ) __a = accelerator.pad_across_processes( _UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id ) __a , __a = accelerator.gather((generated_tokens, generated_tasks) ) __a = generated_tokens.cpu().numpy() __a = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_UpperCAmelCase , _UpperCAmelCase ): gen_token_dict[task].append(_UpperCAmelCase ) __a = [[] for _ in range(_UpperCAmelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: __a = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) code_gens[task].append(remove_last_block(_UpperCAmelCase ) ) return code_gens def __snake_case ( ): # Setup configuration __a = HfArgumentParser(_UpperCAmelCase ) __a = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric __a = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing __a = '''false''' if args.num_workers is None: __a = multiprocessing.cpu_count() # Use dataset load to feed to accelerate __a = Accelerator() set_seed(args.seed , device_specific=_UpperCAmelCase ) # Load model and tokenizer __a = AutoTokenizer.from_pretrained(args.model_ckpt ) __a = tokenizer.eos_token __a = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings __a = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _UpperCAmelCase , _UpperCAmelCase )] ), } # Load evaluation dataset and metric __a = load_dataset('''openai_humaneval''' ) __a = load_metric('''code_eval''' ) __a = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) __a = args.n_samples // args.batch_size __a = TokenizedDataset(_UpperCAmelCase , human_eval['''test'''] , n_copies=_UpperCAmelCase , n_tasks=_UpperCAmelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences __a = DataLoader(_UpperCAmelCase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: __a = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception __a , __a = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase ) __a = complete_code( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , n_tasks=_UpperCAmelCase , batch_size=args.batch_size , **_UpperCAmelCase , ) if accelerator.is_main_process: __a = [] for task in tqdm(range(_UpperCAmelCase ) ): __a = human_eval['''test'''][task]['''test'''] __a = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric __a , __a = code_eval_metric.compute( references=_UpperCAmelCase , predictions=_UpperCAmelCase , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
60
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __snake_case ( _UpperCAmelCase ): __a = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def __snake_case ( _UpperCAmelCase ): __a = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') ) return token def __snake_case ( ): __a = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": __a = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": __a = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __a = [2, 2, 20] __a = [3, 12, 16] __a = [192, 768, 1024] __a = CvtForImageClassification(_UpperCAmelCase ) __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) __a = image_size __a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) ) __a = OrderedDict() __a = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __a = list_of_state_dict + cls_token(_UpperCAmelCase ) __a = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) __a = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __a = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __snake_case :str = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case :Dict = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
60
1
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
60
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( _UpperCAmelCase ): __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0 __a = image[None].transpose(0 , 3 , 1 , 2 ) __a = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class _A ( __UpperCAmelCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE) @torch.no_grad() def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = 1 elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): __a = image.shape[0] else: raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}') if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = preprocess(__SCREAMING_SNAKE_CASE) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters()).dtype __a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE) __a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE) # set timesteps and move to the correct device self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(__SCREAMING_SNAKE_CASE): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1) __a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # predict the noise residual __a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample __a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
60
1
import os from pathlib import Path def __snake_case ( ): from torch.utils.cpp_extension import load __a = Path(_UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' __a = [ root / filename for filename in [ '''vision.cpp''', os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ), os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ), ] ] load( '''MultiScaleDeformableAttention''' , _UpperCAmelCase , with_cuda=_UpperCAmelCase , extra_include_paths=[str(_UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[ '''-DCUDA_HAS_FP16=1''', '''-D__CUDA_NO_HALF_OPERATORS__''', '''-D__CUDA_NO_HALF_CONVERSIONS__''', '''-D__CUDA_NO_HALF2_OPERATORS__''', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
60
from __future__ import annotations from random import random from typing import Generic, TypeVar __snake_case :Any = TypeVar('''KT''') __snake_case :List[str] = TypeVar('''VT''') class _A ( Generic[KT, VT] ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None): '''simple docstring''' __a = key __a = value __a = [] def __repr__( self : Dict): '''simple docstring''' return F'Node({self.key}: {self.value})' @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return len(self.forward) class _A ( Generic[KT, VT] ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16): '''simple docstring''' __a = Node[KT, VT]() __a = 0 __a = p __a = max_level def __str__( self : Union[str, Any]): '''simple docstring''' __a = list(self) if len(__SCREAMING_SNAKE_CASE) == 0: return F'SkipList(level={self.level})' __a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4) __a = max(__SCREAMING_SNAKE_CASE , 4) + 4 __a = self.head __a = [] __a = node.forward.copy() lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) while len(node.forward) != 0: __a = node.forward[0] lines.append( F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) __a = node.forward lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE)) return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE) def __iter__( self : int): '''simple docstring''' __a = self.head while len(node.forward) != 0: yield node.forward[0].key __a = node.forward[0] def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = [] __a = self.head for i in reversed(range(self.level)): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __a = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__SCREAMING_SNAKE_CASE) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: for i, update_node in enumerate(__SCREAMING_SNAKE_CASE): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __a = node.forward[i] else: __a = update_node.forward[:i] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: __a = value else: __a = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE): update_vector.append(self.head) __a = level __a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for i, update_node in enumerate(update_vector[:level]): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i]) if update_node.level < i + 1: update_node.forward.append(__SCREAMING_SNAKE_CASE) else: __a = new_node def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: return node.value return None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value if len(_UpperCAmelCase ) != 4: print() assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def __snake_case ( ): __a = SkipList() assert skip_list.find('''Some key''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def __snake_case ( ): __a = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(_UpperCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __snake_case ( ): def is_sorted(_UpperCAmelCase ): return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) ) __a = SkipList() for i in range(10 ): skip_list.insert(_UpperCAmelCase , _UpperCAmelCase ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCAmelCase ) ) def __snake_case ( ): for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __snake_case ( ): __a = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case :Any = logging.get_logger(__name__) __snake_case :Dict = '''▁''' __snake_case :Optional[Any] = {'''vocab_file''': '''spiece.model'''} __snake_case :str = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } __snake_case :Optional[int] = { '''google/reformer-crime-and-punishment''': 52_4288, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[Any] = VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , __SCREAMING_SNAKE_CASE : List[str]=[] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(__SCREAMING_SNAKE_CASE) @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.sp_model.get_piece_size() def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Union[str, Any]): '''simple docstring''' __a = self.__dict__.copy() __a = None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' if index < self.sp_model.get_piece_size(): __a = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE) return token def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = [] __a = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE) + token __a = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE) out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE) return out_string.strip() def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) elif not os.path.isfile(self.vocab_file): with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi: __a = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
__snake_case :str = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Return True if there is node that has not iterated. __a = [False] * len(_UpperCAmelCase ) __a = [s] __a = True while queue: __a = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCAmelCase ) __a = True __a = u return visited[t] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [-1] * (len(_UpperCAmelCase )) __a = 0 __a = [] __a = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = float('''Inf''' ) __a = sink while s != source: # Find the minimum value in select path __a = min(_UpperCAmelCase , graph[parent[s]][s] ) __a = parent[s] max_flow += path_flow __a = sink while v != source: __a = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __a = parent[v] for i in range(len(_UpperCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
60
1
from math import isqrt def __snake_case ( _UpperCAmelCase ): __a = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase ): __a = False return [i for i in range(2 , _UpperCAmelCase ) if is_prime[i]] def __snake_case ( _UpperCAmelCase = 10**8 ): __a = calculate_prime_numbers(max_number // 2 ) __a = 0 __a = 0 __a = len(_UpperCAmelCase ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'{solution() = }')
60
from __future__ import annotations def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(_UpperCAmelCase ): print(f'{i}\t\t{d}' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [float('''inf''' )] * vertex_count __a = 0.0 for _ in range(vertex_count - 1 ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __a = distance[u] + w __a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() __snake_case :Dict = int(input('''Enter number of vertices: ''').strip()) __snake_case :Any = int(input('''Enter number of edges: ''').strip()) __snake_case :list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) __snake_case ,__snake_case ,__snake_case :int = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) __snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight} __snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip()) __snake_case :Optional[Any] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
60
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __snake_case :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name __snake_case :List[str] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ): __a = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __a = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _A ( __UpperCAmelCase ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : DDPMScheduler , __SCREAMING_SNAKE_CASE : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , ) __a = 2 ** (len(self.movq.config.block_out_channels) - 1) def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' if latents is None: __a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}') __a = latents.to(__SCREAMING_SNAKE_CASE) __a = latents * scheduler.init_noise_sigma return latents def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=0): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''') __a = torch.device(F'cuda:{gpu_id}') __a = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : str=0): '''simple docstring''' if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''') __a = torch.device(F'cuda:{gpu_id}') if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=__SCREAMING_SNAKE_CASE) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __a = None for cpu_offloaded_model in [self.unet, self.movq]: __a , __a = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE) # We'll offload the last model manually. __a = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' if not hasattr(self.unet , '''_hf_hook'''): return self.device for module in self.unet.modules(): if ( hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''') and hasattr(module._hf_hook , '''execution_device''') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(__SCREAMING_SNAKE_CASE) def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = self._execution_device __a = guidance_scale > 1.0 if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = torch.cat(__SCREAMING_SNAKE_CASE , dim=0) __a = image_embeds.shape[0] * num_images_per_prompt if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = torch.cat(__SCREAMING_SNAKE_CASE , dim=0) if do_classifier_free_guidance: __a = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0) __a = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0) __a = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE) self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE) __a = self.scheduler.timesteps __a = self.unet.config.in_channels __a , __a = downscale_height_and_width(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor) # create initial latent __a = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE)): # expand the latents if we are doing classifier free guidance __a = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __a = {'''image_embeds''': image_embeds} __a = self.unet( sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: __a , __a = noise_pred.split(latents.shape[1] , dim=1) __a , __a = noise_pred.chunk(2) __a , __a = variance_pred.chunk(2) __a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __a = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , '''variance_type''') and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __a , __a = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )[0] # post-processing __a = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE)['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ["np", "pil"]: __a = image * 0.5 + 0.5 __a = image.clamp(0 , 1) __a = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
60
import os import sys import unittest __snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''') __snake_case :Any = ''' {0} = None ''' __snake_case :Dict = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __snake_case :str = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''') self.assertIsNone(__SCREAMING_SNAKE_CASE) __a = find_backend(''' if not is_tokenizers_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''') __a = find_backend(''' if not is_tensorflow_text_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''') __a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE) self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE) self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertModel''' , objects['''tf''']) self.assertIn('''FlaxBertModel''' , objects['''flax''']) self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text''']) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers''']) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = create_dummy_object('''CONSTANT''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''') __a = create_dummy_object('''function''' , '''\'torch\'''') self.assertEqual( __SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''') __a = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __a = create_dummy_object('''FakeClass''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']}) self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
60
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class _A ( unittest.TestCase ): def _lowerCamelCase ( self : str): '''simple docstring''' __a = tempfile.mkdtemp() __a = BlipImageProcessor() __a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''') __a = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).tokenizer def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).image_processor def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] __a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs] return image_inputs def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) __a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') __a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0) __a = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = self.prepare_image_inputs() __a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''') __a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = '''lower newer''' __a = processor(text=__SCREAMING_SNAKE_CASE) __a = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = '''lower newer''' __a = self.prepare_image_inputs() __a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE) self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask''']) # test if it raises when no input is passed with pytest.raises(__SCREAMING_SNAKE_CASE): processor() def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a = processor.batch_decode(__SCREAMING_SNAKE_CASE) __a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = '''lower newer''' __a = self.prepare_image_inputs() __a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
60
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __snake_case :str = get_logger() __snake_case :Optional[dict] = None class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' super().__init__(features=__SCREAMING_SNAKE_CASE) import jax from jaxlib.xla_client import Device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): raise ValueError( F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') __a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' F'device: {str(jax.devices()[0])}.') __a = str(jax.devices()[0]) __a = jnp_array_kwargs @staticmethod def _lowerCamelCase ( ): '''simple docstring''' import jax return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()} def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column: if all( isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0) return column def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))): return value elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() __a = {} if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __a = {'''dtype''': jnp.intaa} else: __a = {'''dtype''': jnp.intaa} elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): __a = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = np.asarray(__SCREAMING_SNAKE_CASE) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs}) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array): __a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)): return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) return self._tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict): '''simple docstring''' return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE) return self.recursive_tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0]) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) __a = self._consolidate(__SCREAMING_SNAKE_CASE) return column def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for column_name in batch: __a = self._consolidate(batch[column_name]) return batch
60
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Any = AudioLDMPipeline UpperCamelCase__ : Tuple = TEXT_TO_AUDIO_PARAMS UpperCamelCase__ : str = TEXT_TO_AUDIO_BATCH_PARAMS UpperCamelCase__ : Any = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _lowerCamelCase ( self : Any): '''simple docstring''' torch.manual_seed(0) __a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__SCREAMING_SNAKE_CASE , ) __a = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) __a = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0) __a = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , ) __a = ClapTextModelWithProjection(__SCREAMING_SNAKE_CASE) __a = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77) __a = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__SCREAMING_SNAKE_CASE , ) __a = SpeechTaHifiGan(__SCREAMING_SNAKE_CASE) __a = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=0): '''simple docstring''' if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''): __a = torch.manual_seed(__SCREAMING_SNAKE_CASE) else: __a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) __a = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = '''cpu''' # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE) __a = output.audios[0] assert audio.ndim == 1 assert len(__SCREAMING_SNAKE_CASE) == 256 __a = audio[:10] __a = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_dummy_components() __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = 3 * [inputs['''prompt''']] # forward __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE) __a = output.audios[0] __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = 3 * [inputs.pop('''prompt''')] __a = audioldm_pipe.tokenizer( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) __a = text_inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.text_encoder( __SCREAMING_SNAKE_CASE , ) __a = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __a = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1) __a = prompt_embeds # forward __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE) __a = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.get_dummy_components() __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = 3 * ['''this is a negative prompt'''] __a = negative_prompt __a = 3 * [inputs['''prompt''']] # forward __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE) __a = output.audios[0] __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = 3 * [inputs.pop('''prompt''')] __a = [] for p in [prompt, negative_prompt]: __a = audioldm_pipe.tokenizer( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) __a = text_inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.text_encoder( __SCREAMING_SNAKE_CASE , ) __a = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __a = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1) embeds.append(__SCREAMING_SNAKE_CASE) __a , __a = embeds # forward __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE) __a = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = '''cpu''' # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE) __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = '''egg cracking''' __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE) __a = output.audios[0] assert audio.ndim == 1 assert len(__SCREAMING_SNAKE_CASE) == 256 __a = audio[:10] __a = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = '''cpu''' # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE) __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) __a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts __a = 2 __a = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt __a = 2 __a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=__SCREAMING_SNAKE_CASE).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts __a = 2 __a = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__SCREAMING_SNAKE_CASE).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = '''cpu''' # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.vocoder.config.sampling_rate __a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe(audio_length_in_s=0.0_16 , **__SCREAMING_SNAKE_CASE) __a = output.audios[0] assert audio.ndim == 1 assert len(__SCREAMING_SNAKE_CASE) / vocoder_sampling_rate == 0.0_16 __a = audioldm_pipe(audio_length_in_s=0.0_32 , **__SCREAMING_SNAKE_CASE) __a = output.audios[0] assert audio.ndim == 1 assert len(__SCREAMING_SNAKE_CASE) / vocoder_sampling_rate == 0.0_32 def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_dummy_components() __a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = ['''hey'''] __a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=1) __a = output.audios.shape assert audio_shape == (1, 256) __a = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __a = SpeechTaHifiGan(__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=1) __a = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _lowerCamelCase ( self : Any): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE) @slow class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[str]): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]="cpu" , __SCREAMING_SNAKE_CASE : List[Any]=torch.floataa , __SCREAMING_SNAKE_CASE : Optional[Any]=0): '''simple docstring''' __a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) __a = np.random.RandomState(__SCREAMING_SNAKE_CASE).standard_normal((1, 8, 128, 16)) __a = torch.from_numpy(__SCREAMING_SNAKE_CASE).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE) __a = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''') __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = self.get_inputs(__SCREAMING_SNAKE_CASE) __a = 25 __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE).audios[0] assert audio.ndim == 1 assert len(__SCREAMING_SNAKE_CASE) == 81_920 __a = audio[77_230:77_240] __a = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15]) __a = np.abs(expected_slice - audio_slice).max() assert max_diff < 1E-2 def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''') __a = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) __a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE) audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = self.get_inputs(__SCREAMING_SNAKE_CASE) __a = audioldm_pipe(**__SCREAMING_SNAKE_CASE).audios[0] assert audio.ndim == 1 assert len(__SCREAMING_SNAKE_CASE) == 81_920 __a = audio[27_780:27_790] __a = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12]) __a = np.abs(expected_slice - audio_slice).max() assert max_diff < 3E-2
60
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __snake_case :Tuple = logging.getLogger(__name__) if __name__ == "__main__": __snake_case :Union[str, Any] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0522, type=int) __snake_case :List[str] = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, '''rb''') as fp: __snake_case :Optional[Any] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') __snake_case :Dict = Counter() for tk_ids in data: counter.update(tk_ids) __snake_case :Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): __snake_case :Any = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
1
import numpy as np def __snake_case ( _UpperCAmelCase ): return 1 / (1 + np.exp(-vector )) def __snake_case ( _UpperCAmelCase ): return vector * sigmoid(1.7_02 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
60
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case :List[str] = HfApi() __snake_case :str = {} # fmt: off __snake_case :Optional[Any] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __snake_case :Union[str, Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __snake_case :str = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __snake_case :List[Any] = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __snake_case :Any = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __snake_case :List[str] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __snake_case :Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __snake_case :Tuple = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __snake_case :List[Any] = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __snake_case :Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __snake_case :Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __snake_case :List[str] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __snake_case :Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __snake_case :List[str] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __snake_case :Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __snake_case :List[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case :str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case :List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case :Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
60
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
60
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
60
1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) __a = (boundary[1] - boundary[0]) / steps __a = boundary[0] __a = boundary[1] __a = make_points(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __a = 0.0 y += (h / 2.0) * f(_UpperCAmelCase ) for i in x_i: # print(i) y += h * f(_UpperCAmelCase ) y += (h / 2.0) * f(_UpperCAmelCase ) return y def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = a + h while x < (b - h): yield x __a = x + h def __snake_case ( _UpperCAmelCase ): # enter your function here __a = (x - 0) * (x - 0) return y def __snake_case ( ): __a = 0.0 # Lower bound of integration __a = 1.0 # Upper bound of integration __a = 10.0 # define number of steps or resolution __a = [a, b] # define boundary of integration __a = method_a(_UpperCAmelCase , _UpperCAmelCase ) print(f'y = {y}' ) if __name__ == "__main__": main()
60
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __snake_case :List[Any] = '''zero2''' __snake_case :Optional[Any] = '''zero3''' __snake_case :str = [ZEROa, ZEROa] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test __snake_case :List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _A ( __UpperCAmelCase ): @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' pass def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) self.do_checks(__SCREAMING_SNAKE_CASE) return output_dir def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE) __a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __a = self.get_launcher(__SCREAMING_SNAKE_CASE) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env()) return output_dir def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False): '''simple docstring''' __a = min(2 , get_gpu_count()) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
60
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , ): __a = {} if train_file is not None: __a = [train_file] if eval_file is not None: __a = [eval_file] if test_file is not None: __a = [test_file] __a = datasets.load_dataset('''csv''' , data_files=_UpperCAmelCase ) __a = list(ds[list(files.keys() )[0]].features.keys() ) __a = features_name.pop(_UpperCAmelCase ) __a = list(set(ds[list(files.keys() )[0]][label_name] ) ) __a = {label: i for i, label in enumerate(_UpperCAmelCase )} __a = tokenizer.model_input_names __a = {} if len(_UpperCAmelCase ) == 1: for k in files.keys(): __a = ds[k].map( lambda _UpperCAmelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) , batched=_UpperCAmelCase , ) elif len(_UpperCAmelCase ) == 2: for k in files.keys(): __a = ds[k].map( lambda _UpperCAmelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , ) , batched=_UpperCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __a = {k: v for k, v in ex.items() if k in input_names} __a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __a = {k: v for k, v in ex.items() if k in input_names} __a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __a = {k: v for k, v in ex.items() if k in input_names} __a = labelaid[ex[label_name]] yield (d, label) __a = ( tf.data.Dataset.from_generator( _UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __a = ( tf.data.Dataset.from_generator( _UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __a = ( tf.data.Dataset.from_generator( _UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __snake_case :Optional[int] = logging.getLogger(__name__) @dataclass class _A : UpperCamelCase__ : int = field(metadata={'''help''': '''Which column contains the label'''} ) UpperCamelCase__ : str = field(default=__UpperCAmelCase ,metadata={'''help''': '''The path of the training file'''} ) UpperCamelCase__ : Optional[str] = field(default=__UpperCAmelCase ,metadata={'''help''': '''The path of the development file'''} ) UpperCamelCase__ : Optional[str] = field(default=__UpperCAmelCase ,metadata={'''help''': '''The path of the test file'''} ) UpperCamelCase__ : int = field( default=128 ,metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } ,) UpperCamelCase__ : bool = field( default=__UpperCAmelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class _A : UpperCamelCase__ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase__ : Optional[str] = field( default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase__ : Optional[str] = field( default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase__ : Optional[str] = field( default=__UpperCAmelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,) def __snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __a , __a , __a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __a , __a , __a , __a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(_UpperCAmelCase ) -> Dict: __a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __a = TFTrainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __a = trainer.evaluate() __a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(_UpperCAmelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(_UpperCAmelCase ) return results if __name__ == "__main__": main()
60
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected string as input, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) __a = input_str.split('''_''' ) __a = 0 if use_pascal else 1 __a = words[start_index:] __a = [word[0].upper() + word[1:] for word in words_to_capitalize] __a = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
60
1
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : int): '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2''']) for i, r in enumerate(__SCREAMING_SNAKE_CASE): self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i]) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) __a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info , dset_from_dict.info) def _lowerCamelCase ( self : int): # checks what happens with missing columns '''simple docstring''' __a = [{'''col_1''': 1}, {'''col_2''': '''x'''}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertDictEqual(dset[0] , {'''col_1''': 1}) self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record '''simple docstring''' __a = [{'''col_1''': []}, {'''col_1''': [1, 2]}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64'''))) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Dataset.from_list([]) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0) self.assertListEqual(dset.column_names , [])
60
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _A : UpperCamelCase__ : str UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a , __a , __a = _str_to_version_tuple(self.version_str) def __repr__( self : Tuple): '''simple docstring''' return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.major, self.minor, self.patch def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return Version(__SCREAMING_SNAKE_CASE) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return other raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.') def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' try: __a = self._validate_operand(__SCREAMING_SNAKE_CASE) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self._validate_operand(__SCREAMING_SNAKE_CASE) return self.tuple < other.tuple def __hash__( self : Optional[Any]): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _lowerCamelCase ( self : int): '''simple docstring''' return self.version_str def __snake_case ( _UpperCAmelCase ): __a = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
60
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :int = logging.get_logger(__name__) __snake_case :int = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[str] = '''cvt''' def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[7, 3, 3] , __SCREAMING_SNAKE_CASE : int=[4, 2, 2] , __SCREAMING_SNAKE_CASE : Tuple=[2, 1, 1] , __SCREAMING_SNAKE_CASE : str=[64, 192, 384] , __SCREAMING_SNAKE_CASE : Dict=[1, 3, 6] , __SCREAMING_SNAKE_CASE : Optional[Any]=[1, 2, 10] , __SCREAMING_SNAKE_CASE : str=[4.0, 4.0, 4.0] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE : Tuple=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE : Tuple=[0.0, 0.0, 0.1] , __SCREAMING_SNAKE_CASE : Optional[Any]=[True, True, True] , __SCREAMING_SNAKE_CASE : Optional[int]=[False, False, True] , __SCREAMING_SNAKE_CASE : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __SCREAMING_SNAKE_CASE : List[Any]=[3, 3, 3] , __SCREAMING_SNAKE_CASE : Any=[1, 1, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 2, 2] , __SCREAMING_SNAKE_CASE : Optional[Any]=[1, 1, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 1, 1] , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) __a = num_channels __a = patch_sizes __a = patch_stride __a = patch_padding __a = embed_dim __a = num_heads __a = depth __a = mlp_ratio __a = attention_drop_rate __a = drop_rate __a = drop_path_rate __a = qkv_bias __a = cls_token __a = qkv_projection_method __a = kernel_qkv __a = padding_kv __a = stride_kv __a = padding_q __a = stride_q __a = initializer_range __a = layer_norm_eps
60
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case :int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class _A ( tr.AbstractTransform ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "): '''simple docstring''' __a = sentence_delimiter def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return list(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = [] for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE): chars.extend(self.process_string(__SCREAMING_SNAKE_CASE)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1: chars.append(self.sentence_delimiter) return chars __snake_case :Any = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case :Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case :Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __snake_case :Tuple = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' __snake_case :Tuple = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
60
1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): __a = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: __a = 1 - (matter_density + radiation_density + dark_energy) __a = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __a = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __snake_case :int = 0.3 print( hubble_parameter( hubble_constant=6_8.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
60
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = ['''ViTFeatureExtractor'''] __snake_case :Optional[Any] = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :str = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __snake_case :int = TypeVar('''T''') class _A ( Generic[T] ): def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : list[T] , __SCREAMING_SNAKE_CASE : Callable[[T, T], T]): '''simple docstring''' __a = None __a = len(__SCREAMING_SNAKE_CASE) __a = [any_type for _ in range(self.N)] + arr __a = fnc self.build() def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' for p in range(self.N - 1 , 0 , -1): __a = self.fn(self.st[p * 2] , self.st[p * 2 + 1]) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : T): '''simple docstring''' p += self.N __a = v while p > 1: __a = p // 2 __a = self.fn(self.st[p * 2] , self.st[p * 2 + 1]) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int): # noqa: E741 '''simple docstring''' __a , __a = l + self.N, r + self.N __a = None while l <= r: if l % 2 == 1: __a = self.st[l] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[l]) if r % 2 == 0: __a = self.st[r] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[r]) __a , __a = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __snake_case :int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __snake_case :Tuple = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __snake_case :Optional[int] = SegmentTree(test_array, min) __snake_case :Union[str, Any] = SegmentTree(test_array, max) __snake_case :str = SegmentTree(test_array, lambda a, b: a + b) def __snake_case ( ): for i in range(len(_UpperCAmelCase ) ): for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ): __a = reduce(_UpperCAmelCase , test_array[i : j + 1] ) __a = reduce(_UpperCAmelCase , test_array[i : j + 1] ) __a = reduce(lambda _UpperCAmelCase , _UpperCAmelCase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase ) assert max_range == max_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase ) assert sum_range == sum_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase ) test_all_segments() for index, value in test_updates.items(): __snake_case :int = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
60
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[str] = GPTSwaTokenizer UpperCamelCase__ : Dict = False UpperCamelCase__ : int = True UpperCamelCase__ : List[Any] = False def _lowerCamelCase ( self : List[Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = '''This is a test''' __a = '''This is a test''' return input_text, output_text def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''<s>''' __a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000) def _lowerCamelCase ( self : Dict): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_000) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = tokenizer.tokenize('''This is a test''') self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842]) __a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on __a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) self.assertListEqual( __SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) __a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def _lowerCamelCase ( self : Any): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] __a = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # Test that decode_fast returns the input text for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Any): '''simple docstring''' __a = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off __a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
60
1
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class _A ( unittest.TestCase ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=99 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , ): '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_attention_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_choices def _lowerCamelCase ( self : int): '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __a = None if self.use_attention_mask: __a = random_attention_mask([self.batch_size, self.seq_length]) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __a = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : str = True UpperCamelCase__ : Dict = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : int): '''simple docstring''' __a = FlaxBertModelTester(self) @slow def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = FlaxBertModel.from_pretrained('''bert-base-cased''') __a = model(np.ones((1, 1))) self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
60
from __future__ import annotations __snake_case :Optional[Any] = [] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = 1 solve(_UpperCAmelCase , row + 1 ) __a = 0 return False def __snake_case ( _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __snake_case :Optional[Any] = 8 __snake_case :Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
60
1
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[Any] = IFInpaintingPipeline UpperCamelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} UpperCamelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''} def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._get_dummy_components() def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=0): '''simple docstring''' if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''): __a = torch.manual_seed(__SCREAMING_SNAKE_CASE) else: __a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) __a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _lowerCamelCase ( self : Tuple): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def _lowerCamelCase ( self : int): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _lowerCamelCase ( self : int): '''simple docstring''' self._test_save_load_local() def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
60
def __snake_case ( _UpperCAmelCase ): __a = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( _UpperCAmelCase ): __a = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __a = remove_duplicates(key.upper() ) __a = len(_UpperCAmelCase ) # First fill cipher with key characters __a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_UpperCAmelCase ) , 26 ): __a = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __a = alphabet[i - offset] __a = char return cipher_alphabet def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( ): __a = input('''Enter message to encode or decode: ''' ).strip() __a = input('''Enter keyword: ''' ).strip() __a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __a = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __a = create_cipher_map(_UpperCAmelCase ) print(func(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __snake_case ( _UpperCAmelCase ): __a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(rows * cols * num_images ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = labels_dense.shape[0] __a = numpy.arange(_UpperCAmelCase ) * num_classes __a = numpy.zeros((num_labels, num_classes) ) __a = 1 return labels_one_hot @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(_UpperCAmelCase ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class _A : @deprecated( __SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) __a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype) if fake_data: __a = 10_000 __a = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' __a = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __a = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __a = images.astype(numpy.floataa) __a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0) __a = images __a = labels __a = 0 __a = 0 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self._images @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._labels @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._num_examples @property def _lowerCamelCase ( self : str): '''simple docstring''' return self._epochs_completed def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True): '''simple docstring''' if fake_data: __a = [1] * 784 __a = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__SCREAMING_SNAKE_CASE)], [fake_label for _ in range(__SCREAMING_SNAKE_CASE)], ) __a = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perma] __a = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __a = self._num_examples - start __a = self._images[start : self._num_examples] __a = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perm] __a = self.labels[perm] # Start next epoch __a = 0 __a = batch_size - rest_num_examples __a = self._index_in_epoch __a = self._images[start:end] __a = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size __a = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __a = f.size() print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( _UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __a = fake() __a = fake() __a = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __a = DEFAULT_SOURCE_URL __a = '''train-images-idx3-ubyte.gz''' __a = '''train-labels-idx1-ubyte.gz''' __a = '''t10k-images-idx3-ubyte.gz''' __a = '''t10k-labels-idx1-ubyte.gz''' __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __a = ( '''Validation size should be between 0 and ''' f'{len(_UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(_UpperCAmelCase ) __a = train_images[:validation_size] __a = train_labels[:validation_size] __a = train_images[validation_size:] __a = train_labels[validation_size:] __a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
60
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __snake_case :List[Any] = None __snake_case :Dict = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case :Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __snake_case :Optional[Any] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __snake_case :Optional[int] = '''▁''' class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Dict = BarthezTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = False if not self.vocab_file else True def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __snake_case ( _UpperCAmelCase ): __a , __a = analyze_text(_UpperCAmelCase ) __a = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. __a = sum(single_char_strings.values() ) # one length string __a = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __a = single_char_strings[ch] __a = my_str / all_sum my_fir_sum += prob * math.loga(_UpperCAmelCase ) # entropy formula. # print entropy print(f'{round(-1 * my_fir_sum ):.1f}' ) # two len string __a = sum(two_char_strings.values() ) __a = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __a = cha + cha if sequence in two_char_strings: __a = two_char_strings[sequence] __a = int(_UpperCAmelCase ) / all_sum my_sec_sum += prob * math.loga(_UpperCAmelCase ) # print second entropy print(f'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def __snake_case ( _UpperCAmelCase ): __a = Counter() # type: ignore __a = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_UpperCAmelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __snake_case ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
60
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __snake_case ( _UpperCAmelCase ): __a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(rows * cols * num_images ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = labels_dense.shape[0] __a = numpy.arange(_UpperCAmelCase ) * num_classes __a = numpy.zeros((num_labels, num_classes) ) __a = 1 return labels_one_hot @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(_UpperCAmelCase ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class _A : @deprecated( __SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) __a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype) if fake_data: __a = 10_000 __a = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' __a = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __a = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __a = images.astype(numpy.floataa) __a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0) __a = images __a = labels __a = 0 __a = 0 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self._images @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._labels @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._num_examples @property def _lowerCamelCase ( self : str): '''simple docstring''' return self._epochs_completed def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True): '''simple docstring''' if fake_data: __a = [1] * 784 __a = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__SCREAMING_SNAKE_CASE)], [fake_label for _ in range(__SCREAMING_SNAKE_CASE)], ) __a = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perma] __a = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __a = self._num_examples - start __a = self._images[start : self._num_examples] __a = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perm] __a = self.labels[perm] # Start next epoch __a = 0 __a = batch_size - rest_num_examples __a = self._index_in_epoch __a = self._images[start:end] __a = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size __a = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __a = f.size() print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( _UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __a = fake() __a = fake() __a = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __a = DEFAULT_SOURCE_URL __a = '''train-images-idx3-ubyte.gz''' __a = '''train-labels-idx1-ubyte.gz''' __a = '''t10k-images-idx3-ubyte.gz''' __a = '''t10k-labels-idx1-ubyte.gz''' __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __a = ( '''Validation size should be between 0 and ''' f'{len(_UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(_UpperCAmelCase ) __a = train_images[:validation_size] __a = train_labels[:validation_size] __a = train_images[validation_size:] __a = train_labels[validation_size:] __a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
60
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __snake_case :List[Any] = None __snake_case :Dict = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case :Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __snake_case :Optional[Any] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __snake_case :Optional[int] = '''▁''' class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Dict = BarthezTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = False if not self.vocab_file else True def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _A ( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ): '''simple docstring''' __a = size if size is not None else {'''height''': 20, '''width''': 20} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_reduce_labels def _lowerCamelCase ( self : str): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(dataset[0]['''file'''] ) __a = Image.open(dataset[1]['''file'''] ) return image, map def __snake_case ( ): __a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __a = Image.open(ds[0]['''file'''] ) __a = Image.open(ds[1]['''file'''] ) __a = Image.open(ds[2]['''file'''] ) __a = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : int): '''simple docstring''' __a = BeitImageProcessingTester(self) @property def _lowerCamelCase ( self : int): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''')) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) __a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) __a = [] for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input __a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test not batched input (PIL images) __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) # Test batched input (PIL images) __a , __a = prepare_semantic_batch_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long) self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __a , __a = prepare_semantic_single_inputs() __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 150) __a = True __a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''') self.assertTrue(encoding['''labels'''].min().item() >= 0) self.assertTrue(encoding['''labels'''].max().item() <= 255)
60
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class _A ( unittest.TestCase ): def _lowerCamelCase ( self : Dict): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=__SCREAMING_SNAKE_CASE , ) assert hasattr(self , '''env''') def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' __a = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings __a = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__SCREAMING_SNAKE_CASE , instance_count=__SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=__SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__SCREAMING_SNAKE_CASE , py_version='''py36''' , ) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' TrainingJobAnalytics(__SCREAMING_SNAKE_CASE).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv') @parameterized.expand([(2,)]) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = self.create_estimator(__SCREAMING_SNAKE_CASE) # run training estimator.fit() # result dataframe __a = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis __a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value''']) __a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value''']) # get train time from SageMaker job, this includes starting, preprocessing, stopping __a = ( Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 999_999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy) assert all(t <= self.results['''eval_loss'''] for t in eval_loss) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' , '''w''') as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __SCREAMING_SNAKE_CASE)
60
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : int): '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2''']) for i, r in enumerate(__SCREAMING_SNAKE_CASE): self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i]) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self._create_example_records() __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) __a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info , dset_from_dict.info) def _lowerCamelCase ( self : int): # checks what happens with missing columns '''simple docstring''' __a = [{'''col_1''': 1}, {'''col_2''': '''x'''}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertDictEqual(dset[0] , {'''col_1''': 1}) self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record '''simple docstring''' __a = [{'''col_1''': []}, {'''col_1''': [1, 2]}] __a = Dataset.from_list(__SCREAMING_SNAKE_CASE) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64'''))) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Dataset.from_list([]) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0) self.assertListEqual(dset.column_names , [])
60
1
from __future__ import annotations def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) == 0: return [] __a , __a = min(_UpperCAmelCase ), max(_UpperCAmelCase ) __a = int(max_value - min_value ) + 1 __a = [[] for _ in range(_UpperCAmelCase )] for i in my_list: buckets[int(i - min_value )].append(_UpperCAmelCase ) return [v for bucket in buckets for v in sorted(_UpperCAmelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
60
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __snake_case ( _UpperCAmelCase ): __a = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def __snake_case ( _UpperCAmelCase ): __a = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') ) return token def __snake_case ( ): __a = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": __a = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": __a = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __a = [2, 2, 20] __a = [3, 12, 16] __a = [192, 768, 1024] __a = CvtForImageClassification(_UpperCAmelCase ) __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) __a = image_size __a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) ) __a = OrderedDict() __a = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __a = list_of_state_dict + cls_token(_UpperCAmelCase ) __a = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) __a = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __a = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __snake_case :str = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __snake_case :Dict = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
60
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __snake_case ( ): __a = ArgumentParser( description=( '''PyTorch TPU distributed training launch ''' '''helper utility that will spawn up ''' '''multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=_UpperCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=_UpperCAmelCase , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=_UpperCAmelCase ) return parser.parse_args() def __snake_case ( ): __a = parse_args() # Import training_script as a module. __a = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __a = script_fpath.stem __a = importlib.import_module(_UpperCAmelCase ) # Patch sys.argv __a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
60
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( _UpperCAmelCase ): __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0 __a = image[None].transpose(0 , 3 , 1 , 2 ) __a = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class _A ( __UpperCAmelCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE) @torch.no_grad() def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = 1 elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): __a = image.shape[0] else: raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}') if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = preprocess(__SCREAMING_SNAKE_CASE) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters()).dtype __a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE) __a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE) # set timesteps and move to the correct device self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(__SCREAMING_SNAKE_CASE): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1) __a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # predict the noise residual __a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample __a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
60
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case :int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class _A ( tr.AbstractTransform ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "): '''simple docstring''' __a = sentence_delimiter def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return list(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = [] for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE): chars.extend(self.process_string(__SCREAMING_SNAKE_CASE)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1: chars.append(self.sentence_delimiter) return chars __snake_case :Any = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case :Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case :Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __snake_case :Tuple = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' __snake_case :Tuple = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
60
from __future__ import annotations from random import random from typing import Generic, TypeVar __snake_case :Any = TypeVar('''KT''') __snake_case :List[str] = TypeVar('''VT''') class _A ( Generic[KT, VT] ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None): '''simple docstring''' __a = key __a = value __a = [] def __repr__( self : Dict): '''simple docstring''' return F'Node({self.key}: {self.value})' @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return len(self.forward) class _A ( Generic[KT, VT] ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16): '''simple docstring''' __a = Node[KT, VT]() __a = 0 __a = p __a = max_level def __str__( self : Union[str, Any]): '''simple docstring''' __a = list(self) if len(__SCREAMING_SNAKE_CASE) == 0: return F'SkipList(level={self.level})' __a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4) __a = max(__SCREAMING_SNAKE_CASE , 4) + 4 __a = self.head __a = [] __a = node.forward.copy() lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) while len(node.forward) != 0: __a = node.forward[0] lines.append( F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards)) lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE)) __a = node.forward lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE)) return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE) def __iter__( self : int): '''simple docstring''' __a = self.head while len(node.forward) != 0: yield node.forward[0].key __a = node.forward[0] def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = [] __a = self.head for i in reversed(range(self.level)): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __a = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__SCREAMING_SNAKE_CASE) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: for i, update_node in enumerate(__SCREAMING_SNAKE_CASE): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __a = node.forward[i] else: __a = update_node.forward[:i] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: __a = value else: __a = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE): update_vector.append(self.head) __a = level __a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for i, update_node in enumerate(update_vector[:level]): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i]) if update_node.level < i + 1: update_node.forward.append(__SCREAMING_SNAKE_CASE) else: __a = new_node def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT): '''simple docstring''' __a , __a = self._locate_node(__SCREAMING_SNAKE_CASE) if node is not None: return node.value return None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value if len(_UpperCAmelCase ) != 4: print() assert len(_UpperCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def __snake_case ( ): __a = SkipList() assert skip_list.find('''Some key''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def __snake_case ( ): __a = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def __snake_case ( ): __a = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(_UpperCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __snake_case ( ): def is_sorted(_UpperCAmelCase ): return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) ) __a = SkipList() for i in range(10 ): skip_list.insert(_UpperCAmelCase , _UpperCAmelCase ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCAmelCase ) ) def __snake_case ( ): for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __snake_case ( ): __a = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
from math import factorial __snake_case :List[Any] = {str(d): factorial(d) for d in range(10)} def __snake_case ( _UpperCAmelCase ): return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCAmelCase ) ) def __snake_case ( ): __a = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , _UpperCAmelCase ) if sum_of_digit_factorial(_UpperCAmelCase ) == i ) if __name__ == "__main__": print(f'{solution() = }')
60
__snake_case :str = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Return True if there is node that has not iterated. __a = [False] * len(_UpperCAmelCase ) __a = [s] __a = True while queue: __a = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCAmelCase ) __a = True __a = u return visited[t] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [-1] * (len(_UpperCAmelCase )) __a = 0 __a = [] __a = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = float('''Inf''' ) __a = sink while s != source: # Find the minimum value in select path __a = min(_UpperCAmelCase , graph[parent[s]][s] ) __a = parent[s] max_flow += path_flow __a = sink while v != source: __a = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __a = parent[v] for i in range(len(_UpperCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
60
1
from __future__ import annotations from collections import deque class _A : def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : list[str]): '''simple docstring''' __a = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []}) for keyword in keywords: self.add_keyword(__SCREAMING_SNAKE_CASE) self.set_fail_transitions() def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' __a = 0 for character in keyword: __a = self.find_next_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], }) self.adlist[current_state]["next_states"].append(len(self.adlist) - 1) __a = len(self.adlist) - 1 else: __a = next_state self.adlist[current_state]["output"].append(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = deque() for node in self.adlist[0]["next_states"]: q.append(__SCREAMING_SNAKE_CASE) __a = 0 while q: __a = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__SCREAMING_SNAKE_CASE) __a = self.adlist[r]['''fail_state'''] while ( self.find_next_state(__SCREAMING_SNAKE_CASE , self.adlist[child]['''value''']) is None and state != 0 ): __a = self.adlist[state]['''fail_state'''] __a = self.find_next_state( __SCREAMING_SNAKE_CASE , self.adlist[child]['''value''']) if self.adlist[child]["fail_state"] is None: __a = 0 __a = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' __a = {} # returns a dict with keywords and list of its occurrences __a = 0 for i in range(len(__SCREAMING_SNAKE_CASE)): while ( self.find_next_state(__SCREAMING_SNAKE_CASE , string[i]) is None and current_state != 0 ): __a = self.adlist[current_state]['''fail_state'''] __a = self.find_next_state(__SCREAMING_SNAKE_CASE , string[i]) if next_state is None: __a = 0 else: __a = next_state for key in self.adlist[current_state]["output"]: if key not in result: __a = [] result[key].append(i - len(__SCREAMING_SNAKE_CASE) + 1) return result if __name__ == "__main__": import doctest doctest.testmod()
60
from __future__ import annotations def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(_UpperCAmelCase ): print(f'{i}\t\t{d}' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = [float('''inf''' )] * vertex_count __a = 0.0 for _ in range(vertex_count - 1 ): for j in range(_UpperCAmelCase ): __a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __a = distance[u] + w __a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() __snake_case :Dict = int(input('''Enter number of vertices: ''').strip()) __snake_case :Any = int(input('''Enter number of edges: ''').strip()) __snake_case :list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) __snake_case ,__snake_case ,__snake_case :int = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) __snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight} __snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip()) __snake_case :Optional[Any] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
60
1
__snake_case :Tuple = range(2, 20 + 1) __snake_case :Dict = [10**k for k in range(ks[-1] + 1)] __snake_case :dict[int, dict[int, list[list[int]]]] = {} def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = sum(a_i[j] for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ) __a = sum(a_i[j] * base[j] for j in range(min(len(_UpperCAmelCase ) , _UpperCAmelCase ) ) ) __a , __a = 0, 0 __a = n - i __a = memo.get(_UpperCAmelCase ) if sub_memo is not None: __a = sub_memo.get(_UpperCAmelCase ) if jumps is not None and len(_UpperCAmelCase ) > 0: # find and make the largest jump without going over __a = -1 for _k in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __a = _k break if max_jump >= 0: __a , __a , __a = jumps[max_jump] # since the difference between jumps is cached, add c __a = diff + c for j in range(min(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): __a , __a = divmod(_UpperCAmelCase , 10 ) if new_c > 0: add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: __a = [] else: __a = {c: []} __a = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __a , __a = next_term(_UpperCAmelCase , k - 1 , i + dn , _UpperCAmelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __a , __a = compute(_UpperCAmelCase , _UpperCAmelCase , i + dn , _UpperCAmelCase ) diff += _diff dn += terms_jumped __a = sub_memo[c] # keep jumps sorted by # of terms skipped __a = 0 while j < len(_UpperCAmelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_UpperCAmelCase , (diff, dn, k) ) return (diff, dn) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if i >= n: return 0, i if k > len(_UpperCAmelCase ): a_i.extend([0 for _ in range(k - len(_UpperCAmelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __a = i __a , __a , __a = 0, 0, 0 for j in range(len(_UpperCAmelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __a = ds_c + ds_b diff += addend __a = 0 for j in range(_UpperCAmelCase ): __a = a_i[j] + addend __a , __a = divmod(_UpperCAmelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return diff, i - start_i def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ): __a = digits[j] + addend if s >= 10: __a , __a = divmod(_UpperCAmelCase , 10 ) __a = addend // 10 + quotient else: __a = s __a = addend // 10 if addend == 0: break while addend > 0: __a , __a = divmod(_UpperCAmelCase , 10 ) digits.append(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase = 10**15 ): __a = [1] __a = 1 __a = 0 while True: __a , __a = next_term(_UpperCAmelCase , 20 , i + dn , _UpperCAmelCase ) dn += terms_jumped if dn == n - i: break __a = 0 for j in range(len(_UpperCAmelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'{solution() = }')
60
import os import sys import unittest __snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''') __snake_case :Any = ''' {0} = None ''' __snake_case :Dict = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __snake_case :str = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''') self.assertIsNone(__SCREAMING_SNAKE_CASE) __a = find_backend(''' if not is_tokenizers_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''') __a = find_backend(''' if not is_tensorflow_text_available():''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''') __a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''') __a = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''') def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE) self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE) self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertModel''' , objects['''tf''']) self.assertIn('''FlaxBertModel''' , objects['''flax''']) self.assertIn('''BertModel''' , objects['''torch''']) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text''']) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers''']) def _lowerCamelCase ( self : Any): '''simple docstring''' __a = create_dummy_object('''CONSTANT''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''') __a = create_dummy_object('''function''' , '''\'torch\'''') self.assertEqual( __SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''') __a = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __a = create_dummy_object('''FakeClass''' , '''\'torch\'''') self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']}) self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
60
1
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __snake_case :Any = '''__DUMMY_TRANSFORMERS_USER__''' __snake_case :Optional[int] = '''Dummy User''' __snake_case :str = '''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''' __snake_case :Optional[int] = '''https://hub-ci.huggingface.co''' __snake_case :List[str] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' __snake_case :str = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' __snake_case :List[str] = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def __snake_case ( _UpperCAmelCase ): monkeypatch.setattr( '''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCAmelCase ) @pytest.fixture def __snake_case ( _UpperCAmelCase ): monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCAmelCase ) @pytest.fixture def __snake_case ( _UpperCAmelCase ): monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCAmelCase ) @pytest.fixture def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): HfFolder.save_token(_UpperCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope='''session''' ) def __snake_case ( ): return HfApi(endpoint=_UpperCAmelCase ) @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase ): __a = HfFolder.get_token() HfFolder.save_token(_UpperCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_UpperCAmelCase ) @pytest.fixture def __snake_case ( _UpperCAmelCase ): def _cleanup_repo(_UpperCAmelCase ): hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) return _cleanup_repo @pytest.fixture def __snake_case ( _UpperCAmelCase ): @contextmanager def _temporary_repo(_UpperCAmelCase ): try: yield repo_id finally: cleanup_repo(_UpperCAmelCase ) return _temporary_repo @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = f'repo_txt_data-{int(time.time() * 1_0E3 )}' __a = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase ) hf_api.upload_file( token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}' __a = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase ) hf_api.upload_file( token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}' __a = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase ) hf_api.upload_file( token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return hf_private_dataset_repo_zipped_img_data_
60
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __snake_case :str = get_logger() __snake_case :Optional[dict] = None class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' super().__init__(features=__SCREAMING_SNAKE_CASE) import jax from jaxlib.xla_client import Device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): raise ValueError( F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') __a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' F'device: {str(jax.devices()[0])}.') __a = str(jax.devices()[0]) __a = jnp_array_kwargs @staticmethod def _lowerCamelCase ( ): '''simple docstring''' import jax return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()} def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column: if all( isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0) return column def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))): return value elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() __a = {} if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __a = {'''dtype''': jnp.intaa} else: __a = {'''dtype''': jnp.intaa} elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): __a = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image): __a = np.asarray(__SCREAMING_SNAKE_CASE) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __a = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs}) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array): __a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)): return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct]) return self._tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict): '''simple docstring''' return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE) return self.recursive_tensorize(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0]) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) __a = self._consolidate(__SCREAMING_SNAKE_CASE) return column def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table): '''simple docstring''' __a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE) __a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE) __a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for column_name in batch: __a = self._consolidate(batch[column_name]) return batch
60
1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected string as input, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) __a = input_str.split('''_''' ) __a = 0 if use_pascal else 1 __a = words[start_index:] __a = [word[0].upper() + word[1:] for word in words_to_capitalize] __a = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
60
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __snake_case :Tuple = logging.getLogger(__name__) if __name__ == "__main__": __snake_case :Union[str, Any] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0522, type=int) __snake_case :List[str] = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, '''rb''') as fp: __snake_case :Optional[Any] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') __snake_case :Dict = Counter() for tk_ids in data: counter.update(tk_ids) __snake_case :Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): __snake_case :Any = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
60
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class _A ( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : int=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Dict=400 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __SCREAMING_SNAKE_CASE : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __SCREAMING_SNAKE_CASE : str=True , ): '''simple docstring''' __a = size if size is not None else {'''height''': 224, '''width''': 224} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_convert_rgb def _lowerCamelCase ( self : str): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[Any]=False): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __a = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __a = [] for i in range(self.batch_size): __a , __a = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs] if torchify: __a = [torch.from_numpy(__SCREAMING_SNAKE_CASE) for x in image_inputs] return image_inputs @require_torch @require_vision class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Any = ChineseCLIPImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : int): '''simple docstring''' __a = ChineseCLIPImageProcessingTester(self , do_center_crop=__SCREAMING_SNAKE_CASE) @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''')) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) __a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'''shortest_edge''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' pass def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PIL images __a = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __a = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __a = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : str): '''simple docstring''' __a = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__SCREAMING_SNAKE_CASE) __a = 3 @property def _lowerCamelCase ( self : Any): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''')) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''')) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' pass def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict) # create random PIL images __a = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
60
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case :List[str] = HfApi() __snake_case :str = {} # fmt: off __snake_case :Optional[Any] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __snake_case :Union[str, Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __snake_case :str = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __snake_case :List[Any] = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __snake_case :Any = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __snake_case :List[str] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __snake_case :Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __snake_case :Tuple = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __snake_case :List[Any] = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __snake_case :Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __snake_case :Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __snake_case :List[str] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __snake_case :Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __snake_case :List[str] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __snake_case :Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __snake_case :List[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case :str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case :List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case :Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
60
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __snake_case :Any = 3 def __snake_case ( _UpperCAmelCase ): print('''Generating primitive root of p''' ) while True: __a = random.randrange(3 , _UpperCAmelCase ) if pow(_UpperCAmelCase , 2 , _UpperCAmelCase ) == 1: continue if pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) == 1: continue return g def __snake_case ( _UpperCAmelCase ): print('''Generating prime p...''' ) __a = rabin_miller.generate_large_prime(_UpperCAmelCase ) # select large prime number. __a = primitive_root(_UpperCAmelCase ) # one primitive root on modulo p. __a = random.randrange(3 , _UpperCAmelCase ) # private_key -> have to be greater than 2 for safety. __a = cryptomath.find_mod_inverse(pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) __a = (key_size, e_a, e_a, p) __a = (key_size, d) return public_key, private_key def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() __a , __a = generate_key(_UpperCAmelCase ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt' , '''w''' ) as fo: fo.write(f'{private_key[0]},{private_key[1]}' ) def __snake_case ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
60
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
60
1
class _A ( __UpperCAmelCase ): pass class _A ( __UpperCAmelCase ): pass class _A : def __init__( self : Union[str, Any]): '''simple docstring''' __a = [ [], [], [], ] def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' try: if len(self.queues[priority]) >= 100: raise OverflowError('''Maximum queue size is 100''') self.queues[priority].append(__SCREAMING_SNAKE_CASE) except IndexError: raise ValueError('''Valid priorities are 0, 1, and 2''') def _lowerCamelCase ( self : Dict): '''simple docstring''' for queue in self.queues: if queue: return queue.pop(0) raise UnderFlowError('''All queues are empty''') def __str__( self : Tuple): '''simple docstring''' return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues)) class _A : def __init__( self : Tuple): '''simple docstring''' __a = [] def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' if len(self.queue) == 100: raise OverFlowError('''Maximum queue size is 100''') self.queue.append(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' if not self.queue: raise UnderFlowError('''The queue is empty''') else: __a = min(self.queue) self.queue.remove(__SCREAMING_SNAKE_CASE) return data def __str__( self : Any): '''simple docstring''' return str(self.queue) def __snake_case ( ): __a = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_UpperCAmelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_UpperCAmelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __snake_case ( ): __a = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_UpperCAmelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_UpperCAmelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
60
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __snake_case :List[Any] = '''zero2''' __snake_case :Optional[Any] = '''zero3''' __snake_case :str = [ZEROa, ZEROa] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test __snake_case :List[Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _A ( __UpperCAmelCase ): @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) @require_torch_multi_gpu @parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' self.run_and_check( stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' pass def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , ) self.do_checks(__SCREAMING_SNAKE_CASE) return output_dir def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE) __a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __a = self.get_launcher(__SCREAMING_SNAKE_CASE) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env()) return output_dir def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False): '''simple docstring''' __a = min(2 , get_gpu_count()) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
60
1
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def __snake_case ( _UpperCAmelCase ): __a = [False] * len(_UpperCAmelCase ) __a = [-1] * len(_UpperCAmelCase ) def dfs(_UpperCAmelCase , _UpperCAmelCase ): __a = True __a = c for u in graph[v]: if not visited[u]: dfs(_UpperCAmelCase , 1 - c ) for i in range(len(_UpperCAmelCase ) ): if not visited[i]: dfs(_UpperCAmelCase , 0 ) for i in range(len(_UpperCAmelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __snake_case :int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
60
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected string as input, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}' raise ValueError(_UpperCAmelCase ) __a = input_str.split('''_''' ) __a = 0 if use_pascal else 1 __a = words[start_index:] __a = [word[0].upper() + word[1:] for word in words_to_capitalize] __a = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
60
1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [0 for i in range(r + 1 )] # nc0 = 1 __a = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __a = min(_UpperCAmelCase , _UpperCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
60
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class _A : UpperCamelCase__ : str UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None UpperCamelCase__ : Optional[Union[str, int]] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a , __a , __a = _str_to_version_tuple(self.version_str) def __repr__( self : Tuple): '''simple docstring''' return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.major, self.minor, self.patch def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return Version(__SCREAMING_SNAKE_CASE) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): return other raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.') def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' try: __a = self._validate_operand(__SCREAMING_SNAKE_CASE) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' __a = self._validate_operand(__SCREAMING_SNAKE_CASE) return self.tuple < other.tuple def __hash__( self : Optional[Any]): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _lowerCamelCase ( self : int): '''simple docstring''' return self.version_str def __snake_case ( _UpperCAmelCase ): __a = _VERSION_REG.match(_UpperCAmelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def __snake_case ( _UpperCAmelCase ): return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
60
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case :List[str] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case :Union[str, Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = state_dict.pop(_UpperCAmelCase ) __a = val def __snake_case ( _UpperCAmelCase ): __a = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: __a = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) __a = value else: __a = value return new_state_dict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ): __a = '''''' if is_panoptic: __a = '''conditional_detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) __a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __a = in_proj_weight[:256, :] __a = in_proj_bias[:256] __a = in_proj_weight[256:512, :] __a = in_proj_bias[256:512] __a = in_proj_weight[-256:, :] __a = in_proj_bias[-256:] def __snake_case ( ): __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: __a = '''resnet101''' if "dc5" in model_name: __a = True __a = '''panoptic''' in model_name if is_panoptic: __a = 250 else: __a = 91 __a = '''huggingface/label-files''' __a = '''coco-detection-id2label.json''' __a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} # load image processor __a = '''coco_panoptic''' if is_panoptic else '''coco_detection''' __a = ConditionalDetrImageProcessor(format=_UpperCAmelCase ) # prepare image __a = prepare_img() __a = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ) __a = encoding['''pixel_values'''] logger.info(f'Converting model {model_name}...' ) # load original model from torch hub __a = torch.hub.load('''DeppMeng/ConditionalDETR''' , _UpperCAmelCase , pretrained=_UpperCAmelCase ).eval() __a = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: __a = '''conditional_detr.''' + src rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __a = rename_backbone_keys(_UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __a = '''conditional_detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''conditional_detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): __a = state_dict.pop(_UpperCAmelCase ) __a = val elif "class_labels_classifier" in key or "bbox_predictor" in key: __a = state_dict.pop(_UpperCAmelCase ) __a = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: __a = state_dict.pop(_UpperCAmelCase ) __a = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): __a = state_dict.pop(_UpperCAmelCase ) __a = val # finally, create HuggingFace model and load state dict __a = ConditionalDetrForSegmentation(_UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) model.eval() model.push_to_hub(repo_id=_UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' ) # verify our conversion __a = conditional_detr(_UpperCAmelCase ) __a = model(_UpperCAmelCase ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __snake_case :List[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
60
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case :int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class _A ( tr.AbstractTransform ): def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "): '''simple docstring''' __a = sentence_delimiter def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' return list(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]): '''simple docstring''' __a = [] for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE): chars.extend(self.process_string(__SCREAMING_SNAKE_CASE)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1: chars.append(self.sentence_delimiter) return chars __snake_case :Any = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case :Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case :Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __snake_case :Tuple = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' __snake_case :Tuple = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"] __a = 0 __a = 0 for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = jiwer.compute_measures( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
60
1
import os def __snake_case ( ): with open(os.path.dirname(_UpperCAmelCase ) + '''/grid.txt''' ) as f: __a = [] # noqa: E741 for _ in range(20 ): l.append([int(_UpperCAmelCase ) for x in f.readline().split()] ) __a = 0 # right for i in range(20 ): for j in range(17 ): __a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: __a = temp # down for i in range(17 ): for j in range(20 ): __a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: __a = temp # diagonal 1 for i in range(17 ): for j in range(17 ): __a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: __a = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): __a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: __a = temp return maximum if __name__ == "__main__": print(solution())
60
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = ['''ViTFeatureExtractor'''] __snake_case :Optional[Any] = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :str = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
1
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = ComputeEnvironment.AMAZON_SAGEMAKER UpperCamelCase__ : int = True UpperCamelCase__ : Dict = '''ml.p3.2xlarge''' UpperCamelCase__ : List[Any] = '''accelerate_sagemaker_execution_role''' UpperCamelCase__ : int = '''hf-sm''' UpperCamelCase__ : List[Any] = '''us-east-1''' UpperCamelCase__ : List[Any] = 1 UpperCamelCase__ : List[str] = '''accelerate-sagemaker-1''' UpperCamelCase__ : Optional[int] = '''1.6''' UpperCamelCase__ : List[str] = '''4.4''' UpperCamelCase__ : List[str] = '''train.py''' UpperCamelCase__ : Tuple = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] UpperCamelCase__ : Optional[int] = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) assert isinstance(converted_args['''model_name_or_path'''] , __SCREAMING_SNAKE_CASE) assert isinstance(converted_args['''do_train'''] , __SCREAMING_SNAKE_CASE) assert isinstance(converted_args['''epochs'''] , __SCREAMING_SNAKE_CASE) assert isinstance(converted_args['''learning_rate'''] , __SCREAMING_SNAKE_CASE) assert isinstance(converted_args['''max_steps'''] , __SCREAMING_SNAKE_CASE) with pytest.raises(__SCREAMING_SNAKE_CASE): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
60
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : List[str] = GPTSwaTokenizer UpperCamelCase__ : Dict = False UpperCamelCase__ : int = True UpperCamelCase__ : List[Any] = False def _lowerCamelCase ( self : List[Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = '''This is a test''' __a = '''This is a test''' return input_text, output_text def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''<s>''' __a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000) def _lowerCamelCase ( self : Dict): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_000) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = tokenizer.tokenize('''This is a test''') self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842]) __a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on __a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) self.assertListEqual( __SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) __a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def _lowerCamelCase ( self : Any): '''simple docstring''' __a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE) __a = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] __a = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # Test that decode_fast returns the input text for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) @slow def _lowerCamelCase ( self : Any): '''simple docstring''' __a = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off __a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
60
1
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __snake_case :Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , ): output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , ) else: export( _UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , ) @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ): __a = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __a = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: __a = '''cpu''' __a = Path(_UpperCAmelCase ) # VAE DECODER __a = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) __a = vae_decoder.config.latent_channels # forward only through the decoder part __a = vae_decoder.decode onnx_export( _UpperCAmelCase , model_args=( torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=_UpperCAmelCase , ) del vae_decoder if __name__ == "__main__": __snake_case :List[Any] = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') __snake_case :str = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('''SD: Done: ONNX''')
60
from __future__ import annotations __snake_case :Optional[Any] = [] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = 1 solve(_UpperCAmelCase , row + 1 ) __a = 0 return False def __snake_case ( _UpperCAmelCase ): for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __snake_case :Optional[Any] = 8 __snake_case :Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
60
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __snake_case :int = logging.get_logger(__name__) __snake_case :str = OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __snake_case :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def __snake_case ( _UpperCAmelCase ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __a = model_type_to_module_name(_UpperCAmelCase ) __a = importlib.import_module(f'.{module_name}' , '''transformers.models''' ) try: return getattr(_UpperCAmelCase , _UpperCAmelCase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(_UpperCAmelCase , '''__name__''' , _UpperCAmelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __a = importlib.import_module('''transformers''' ) if hasattr(_UpperCAmelCase , _UpperCAmelCase ): return getattr(_UpperCAmelCase , _UpperCAmelCase ) return None def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ): __a = get_file_from_repo( _UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(_UpperCAmelCase , encoding='''utf-8''' ) as reader: return json.load(_UpperCAmelCase ) class _A : def __init__( self : Any): '''simple docstring''' raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''') @classmethod @replace_list_option_in_docstrings(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' __a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) __a = kwargs.pop('''trust_remote_code''' , __SCREAMING_SNAKE_CASE) __a = True __a , __a = FeatureExtractionMixin.get_feature_extractor_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = config_dict.get('''feature_extractor_type''' , __SCREAMING_SNAKE_CASE) __a = None if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}): __a = config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) # It could be in `config.feature_extractor_type`` __a = getattr(__SCREAMING_SNAKE_CASE , '''feature_extractor_type''' , __SCREAMING_SNAKE_CASE) if hasattr(__SCREAMING_SNAKE_CASE , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map: __a = config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: __a = feature_extractor_class_from_name(__SCREAMING_SNAKE_CASE) __a = feature_extractor_auto_map is not None __a = feature_extractor_class is not None or type(__SCREAMING_SNAKE_CASE) in FEATURE_EXTRACTOR_MAPPING __a = resolve_trust_remote_code( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) if has_remote_code and trust_remote_code: __a = get_class_from_dynamic_module( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = kwargs.pop('''code_revision''' , __SCREAMING_SNAKE_CASE) if os.path.isdir(__SCREAMING_SNAKE_CASE): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(__SCREAMING_SNAKE_CASE) in FEATURE_EXTRACTOR_MAPPING: __a = FEATURE_EXTRACTOR_MAPPING[type(__SCREAMING_SNAKE_CASE)] return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) raise ValueError( F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ' F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ' F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}') @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
60
def __snake_case ( _UpperCAmelCase ): __a = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( _UpperCAmelCase ): __a = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __a = remove_duplicates(key.upper() ) __a = len(_UpperCAmelCase ) # First fill cipher with key characters __a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_UpperCAmelCase ) , 26 ): __a = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __a = alphabet[i - offset] __a = char return cipher_alphabet def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def __snake_case ( ): __a = input('''Enter message to encode or decode: ''' ).strip() __a = input('''Enter keyword: ''' ).strip() __a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __a = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __a = create_cipher_map(_UpperCAmelCase ) print(func(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
import math import tensorflow as tf from packaging import version def __snake_case ( _UpperCAmelCase ): __a = tf.convert_to_tensor(_UpperCAmelCase ) __a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __snake_case ( _UpperCAmelCase ): __a = tf.convert_to_tensor(_UpperCAmelCase ) __a = tf.cast(math.pi , x.dtype ) __a = tf.cast(0.04_47_15 , x.dtype ) __a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_UpperCAmelCase , 3 )) )) return x * cdf def __snake_case ( _UpperCAmelCase ): __a = tf.convert_to_tensor(_UpperCAmelCase ) return x * tf.tanh(tf.math.softplus(_UpperCAmelCase ) ) def __snake_case ( _UpperCAmelCase ): __a = tf.convert_to_tensor(_UpperCAmelCase ) __a = tf.cast(0.04_47_15 , x.dtype ) __a = tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __snake_case ( _UpperCAmelCase ): __a = tf.convert_to_tensor(_UpperCAmelCase ) __a = tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __snake_case ( _UpperCAmelCase ): return tf.clip_by_value(_gelu(_UpperCAmelCase ) , -10 , 10 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=-1 ): __a , __a = tf.split(_UpperCAmelCase , 2 , axis=_UpperCAmelCase ) return a * tf.math.sigmoid(_UpperCAmelCase ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def __snake_case ( _UpperCAmelCase ): return tf.keras.activations.gelu(_UpperCAmelCase , approximate=_UpperCAmelCase ) __snake_case :str = tf.keras.activations.gelu __snake_case :Dict = approximate_gelu_wrap else: __snake_case :List[str] = _gelu __snake_case :Optional[int] = _gelu_new __snake_case :Union[str, Any] = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def __snake_case ( _UpperCAmelCase ): if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
60
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __snake_case :List[Any] = None __snake_case :Dict = logging.get_logger(__name__) __snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case :Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __snake_case :Optional[Any] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __snake_case :Optional[int] = '''▁''' class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str = ['''input_ids''', '''attention_mask'''] UpperCamelCase__ : Dict = BarthezTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = vocab_file __a = False if not self.vocab_file else True def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(__SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE) return (out_vocab_file,)
60
1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = len(_UpperCAmelCase ) __a = [[0] * n for i in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase ): __a = y_points[i] for i in range(2 , _UpperCAmelCase ): for j in range(_UpperCAmelCase , _UpperCAmelCase ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
60
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __snake_case ( _UpperCAmelCase ): __a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(rows * cols * num_images ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = labels_dense.shape[0] __a = numpy.arange(_UpperCAmelCase ) * num_classes __a = numpy.zeros((num_labels, num_classes) ) __a = 1 return labels_one_hot @deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __a = _readaa(_UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __a = _readaa(_UpperCAmelCase ) __a = bytestream.read(_UpperCAmelCase ) __a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class _A : @deprecated( __SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' __a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda) __a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype) if fake_data: __a = 10_000 __a = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' __a = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __a = images.reshape( images.shape[0] , images.shape[1] * images.shape[2]) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __a = images.astype(numpy.floataa) __a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0) __a = images __a = labels __a = 0 __a = 0 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self._images @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._labels @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return self._num_examples @property def _lowerCamelCase ( self : str): '''simple docstring''' return self._epochs_completed def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True): '''simple docstring''' if fake_data: __a = [1] * 784 __a = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__SCREAMING_SNAKE_CASE)], [fake_label for _ in range(__SCREAMING_SNAKE_CASE)], ) __a = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perma] __a = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __a = self._num_examples - start __a = self._images[start : self._num_examples] __a = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __a = numpy.arange(self._num_examples) numpy.random.shuffle(__SCREAMING_SNAKE_CASE) __a = self.images[perm] __a = self.labels[perm] # Start next epoch __a = 0 __a = batch_size - rest_num_examples __a = self._index_in_epoch __a = self._images[start:end] __a = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0), ) else: self._index_in_epoch += batch_size __a = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __a = f.size() print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( _UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __a = fake() __a = fake() __a = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __a = DEFAULT_SOURCE_URL __a = '''train-images-idx3-ubyte.gz''' __a = '''train-labels-idx1-ubyte.gz''' __a = '''t10k-images-idx3-ubyte.gz''' __a = '''t10k-labels-idx1-ubyte.gz''' __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_images(_UpperCAmelCase ) __a = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , '''rb''' ) as f: __a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __a = ( '''Validation size should be between 0 and ''' f'{len(_UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(_UpperCAmelCase ) __a = train_images[:validation_size] __a = train_labels[:validation_size] __a = train_images[validation_size:] __a = train_labels[validation_size:] __a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
60
1