code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a :Optional[int] = logging.get_logger(__name__) __a :Optional[int] = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'detr' _lowerCamelCase : Union[str, Any] = ['past_key_values'] _lowerCamelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[Any]=100 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : Optional[int]=2048 , UpperCAmelCase : str=8 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Tuple=1.0 , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]="sine" , UpperCAmelCase : str="resnet50" , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=False , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Dict=0.1 , **UpperCAmelCase : int , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) A_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = backbone_config.get("model_type" ) A_ = CONFIG_MAPPING[backbone_model_type] A_ = config_class.from_dict(UpperCAmelCase ) # set timm attributes to None A_ , A_ , A_ = None, None, None A_ = use_timm_backbone A_ = backbone_config A_ = num_channels A_ = num_queries A_ = d_model A_ = encoder_ffn_dim A_ = encoder_layers A_ = encoder_attention_heads A_ = decoder_ffn_dim A_ = decoder_layers A_ = decoder_attention_heads A_ = dropout A_ = attention_dropout A_ = activation_dropout A_ = activation_function A_ = init_std A_ = init_xavier_std A_ = encoder_layerdrop A_ = decoder_layerdrop A_ = encoder_layers A_ = auxiliary_loss A_ = position_embedding_type A_ = backbone A_ = use_pretrained_backbone A_ = dilation # Hungarian matcher A_ = class_cost A_ = bbox_cost A_ = giou_cost # Loss coefficients A_ = mask_loss_coefficient A_ = dice_loss_coefficient A_ = bbox_loss_coefficient A_ = giou_loss_coefficient A_ = eos_coefficient super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Dict ): return self.encoder_attention_heads @property def __A ( self : List[Any] ): return self.d_model @classmethod def __A ( cls : Dict , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : Any ): return cls(backbone_config=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : int ): A_ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A_ = self.backbone_config.to_dict() A_ = self.__class__.model_type return output class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = version.parse('1.11' ) @property def __A ( self : List[str] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __A ( self : Tuple ): return 1E-5 @property def __A ( self : Union[str, Any] ): return 12
86
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __a :Dict = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = ViTConfig() A_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A_ = True A_ = int(vit_name[-12:-10] ) A_ = int(vit_name[-9:-6] ) else: A_ = 1000 A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} A_ = int(vit_name[-6:-4] ) A_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A_ = 192 A_ = 768 A_ = 12 A_ = 3 elif vit_name[9:].startswith("small" ): A_ = 384 A_ = 1536 A_ = 12 A_ = 6 else: pass else: if vit_name[4:].startswith("small" ): A_ = 768 A_ = 2304 A_ = 8 A_ = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 elif vit_name[4:].startswith("huge" ): A_ = 1280 A_ = 5120 A_ = 32 A_ = 16 # load original model from timm A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": A_ = ViTModel(__UpperCamelCase ).eval() else: A_ = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A_ = DeiTImageProcessor(size=config.image_size ) else: A_ = ViTImageProcessor(size=config.image_size ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = encoding["pixel_values"] A_ = model(__UpperCamelCase ) if base_model: A_ = timm_model.forward_features(__UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 ) else: A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[int] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
86
1
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = DDIMPipeline _lowerCamelCase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _lowerCamelCase : Any = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'latents', 'callback', 'callback_steps', } _lowerCamelCase : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS _lowerCamelCase : Optional[int] = False def __A ( self : Any ): torch.manual_seed(0 ) A_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) A_ = DDIMScheduler() A_ = {"unet": unet, "scheduler": scheduler} return components def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=0 ): if str(UpperCAmelCase ).startswith("mps" ): A_ = torch.manual_seed(UpperCAmelCase ) else: A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) A_ = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __A ( self : str ): A_ = "cpu" A_ = self.get_dummy_components() A_ = self.pipeline_class(**UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = self.get_dummy_inputs(UpperCAmelCase ) A_ = pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) A_ = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) A_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase , 1E-3 ) def __A ( self : Any ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __A ( self : Optional[int] ): super().test_save_load_local(expected_max_difference=3E-3 ) def __A ( self : Optional[int] ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def __A ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any ): A_ = "google/ddpm-cifar10-32" A_ = UNetaDModel.from_pretrained(UpperCAmelCase ) A_ = DDIMScheduler() A_ = DDIMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) ddim.to(UpperCAmelCase ) ddim.set_progress_bar_config(disable=UpperCAmelCase ) A_ = torch.manual_seed(0 ) A_ = ddim(generator=UpperCAmelCase , eta=0.0 , output_type="numpy" ).images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A_ = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : Optional[Any] ): A_ = "google/ddpm-ema-bedroom-256" A_ = UNetaDModel.from_pretrained(UpperCAmelCase ) A_ = DDIMScheduler.from_pretrained(UpperCAmelCase ) A_ = DDIMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) ddpm.to(UpperCAmelCase ) ddpm.set_progress_bar_config(disable=UpperCAmelCase ) A_ = torch.manual_seed(0 ) A_ = ddpm(generator=UpperCAmelCase , output_type="numpy" ).images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) A_ = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
86
def __snake_case ( __UpperCamelCase : int = 50 ): """simple docstring""" A_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
86
1
from PIL import Image def __snake_case ( __UpperCamelCase : Image ,__UpperCamelCase : int ): """simple docstring""" A_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__UpperCamelCase : int ) -> int: return int(128 + factor * (c - 128) ) return img.point(__UpperCamelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change contrast to 170 __a :Tuple = change_contrast(img, 170) cont_img.save('image_data/lena_high_contrast.png', format='png')
86
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __a :List[str] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , **UpperCAmelCase : List[str] ): super().__init__(**UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type(UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ): if "text_queries" in kwargs: A_ = kwargs.pop("text_queries" ) if isinstance(UpperCAmelCase , (str, Image.Image) ): A_ = {"image": image, "candidate_labels": candidate_labels} else: A_ = image A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase ) return results def __A ( self : int , **UpperCAmelCase : Tuple ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] if "top_k" in kwargs: A_ = kwargs["top_k"] return {}, {}, postprocess_params def __A ( self : List[str] , UpperCAmelCase : Dict ): A_ = load_image(inputs["image"] ) A_ = inputs["candidate_labels"] if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = candidate_labels.split("," ) A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase ): A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework ) A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self : str , UpperCAmelCase : int ): A_ = model_inputs.pop("target_size" ) A_ = model_inputs.pop("candidate_label" ) A_ = model_inputs.pop("is_last" ) A_ = self.model(**UpperCAmelCase ) A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ): A_ = [] for model_output in model_outputs: A_ = model_output["candidate_label"] A_ = BaseModelOutput(UpperCAmelCase ) A_ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): A_ = outputs["scores"][index].item() A_ = self._get_bounding_box(outputs["boxes"][index][0] ) A_ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase ) A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase ) if top_k: A_ = results[:top_k] return results def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Optional[Any]=18 , UpperCAmelCase : List[str]=30 , UpperCAmelCase : Optional[Any]=400 , UpperCAmelCase : str=True , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Dict=True , ): A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = min_resolution A_ = max_resolution A_ = do_resize A_ = size_divisor A_ = do_rescale def __A ( self : Tuple ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Any = GLPNImageProcessor if is_vision_available() else None def __A ( self : Any ): A_ = GLPNImageProcessingTester(self ) @property def __A ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Optional[Any] ): A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(UpperCAmelCase , "size_divisor" ) ) self.assertTrue(hasattr(UpperCAmelCase , "resample" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_rescale" ) ) def __A ( self : List[str] ): pass def __A ( self : Optional[Any] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __A ( self : str ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __A ( self : str ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
86
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) __a :int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __a :Tuple = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ): """simple docstring""" A_ = torch.load(__UpperCamelCase ) A_ = WavLMConfigOrig(checkpoint["cfg"] ) A_ = WavLMOrig(__UpperCamelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: A_ = WavLMConfig.from_pretrained(__UpperCamelCase ) else: A_ = WavLMConfig() A_ = WavLMModel(__UpperCamelCase ) recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ) hf_wavlm.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a :Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
1
from timeit import timeit def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if number < 0: raise ValueError("the value of input must not be negative" ) A_ = 0 while number: number &= number - 1 result += 1 return result def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if number < 0: raise ValueError("the value of input must not be negative" ) A_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __snake_case ( ): """simple docstring""" def do_benchmark(__UpperCamelCase : int ) -> None: A_ = "import __main__ as z" print(f'''Benchmark when {number = }:''' ) print(f'''{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }''' ) A_ = timeit("z.get_set_bits_count_using_modulo_operator(25)" ,setup=__UpperCamelCase ) print(f'''timeit() runs in {timing} seconds''' ) print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }''' ) A_ = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" ,setup=__UpperCamelCase ,) print(f'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(__UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
86
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ): """simple docstring""" A_ = length or len(__UpperCamelCase ) A_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A_ , A_ = list_data[i + 1], list_data[i] A_ = True return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a :List[Any] = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , 0.1 ) A_ = Accelerator() A_ = accelerator.prepare(UpperCAmelCase ) try: pickle.loads(pickle.dumps(UpperCAmelCase ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
86
1
__a :Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __a :List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}] __a :Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
86
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a :List[str] = logging.get_logger(__name__) __a :Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __a :Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ = None for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True elif name.split("." )[0] == "proj": A_ = fairseq_model.proj A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.split(" " )[0] for line in lines] A_ = len(__UpperCamelCase ) A_ = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,): """simple docstring""" A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase ) A_ = SpeechaTextaConfig.from_pretrained( __UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase ) A_ = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ = model[0].eval() # set weights for wav2vec2 encoder A_ = WavaVecaModel(__UpperCamelCase ) A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase ) A_ = SpeechaTextaForCausalLM(__UpperCamelCase ) A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase ) A_ = False # add projection layer A_ = nn.Parameter(projection_layer.weight ) A_ = nn.Parameter(projection_layer.bias ) A_ = create_vocab_dict(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) ) tokenizer.save_pretrained(__UpperCamelCase ) A_ = hf_wavavec.config.to_dict() A_ = tokenizer.pad_token_id A_ = tokenizer.bos_token_id A_ = tokenizer.eos_token_id A_ = "speech_to_text_2" A_ = "wav2vec2" A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) feature_extractor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __a :Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
86
1
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _a : """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Any=13 , UpperCAmelCase : Any=30 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : str=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=2 , ): A_ = parent A_ = batch_size A_ = image_size A_ = patch_size A_ = num_channels A_ = is_training A_ = use_labels A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = type_sequence_label_size A_ = initializer_range A_ = scope A_ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ = (image_size // patch_size) ** 2 A_ = num_patches + 1 def __A ( self : Union[str, Any] ): A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = self.get_config() return config, pixel_values, labels def __A ( self : Optional[Any] ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = ViTModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ): A_ = ViTForMaskedImageModeling(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ = 1 A_ = ViTForMaskedImageModeling(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = self.type_sequence_label_size A_ = ViTForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ = 1 A_ = ViTForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self : Optional[Any] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[str] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _lowerCamelCase : Optional[Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _lowerCamelCase : List[Any] = True _lowerCamelCase : List[str] = False _lowerCamelCase : str = False _lowerCamelCase : Tuple = False def __A ( self : Optional[Any] ): A_ = ViTModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def __A ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __A ( self : Tuple ): pass def __A ( self : int ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def __A ( self : Tuple ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) A_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ = [*signature.parameters.keys()] A_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase ) def __A ( self : str ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) @slow def __A ( self : Tuple ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = ViTModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __snake_case ( ): """simple docstring""" A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : List[Any] ): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __A ( self : str ): A_ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) # verify the logits A_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) A_ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self : Any ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. A_ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCAmelCase ) A_ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ) A_ = inputs.pixel_values.to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(UpperCAmelCase , interpolate_pos_encoding=UpperCAmelCase ) # verify the logits A_ = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase ) A_ = torch.tensor( [[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __A ( self : Optional[int] ): A_ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ) A_ = inputs.pixel_values.to(UpperCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A_ = model(UpperCAmelCase )
86
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a :str = logging.get_logger(__name__) __a :Any = Dict[str, Any] __a :int = List[Prediction] @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __A ( self : str , **UpperCAmelCase : str ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return super().__call__(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any ): A_ = load_image(UpperCAmelCase ) A_ = torch.IntTensor([[image.height, image.width]] ) A_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) A_ = target_size return inputs def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = model_inputs.pop("target_size" ) A_ = self.model(**UpperCAmelCase ) A_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: A_ = model_inputs["bbox"] return model_outputs def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ): A_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_ , A_ = target_size[0].tolist() def unnormalize(UpperCAmelCase : Any ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )] A_ = ["score", "label", "box"] A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = raw_annotations[0] A_ = raw_annotation["scores"] A_ = raw_annotation["labels"] A_ = raw_annotation["boxes"] A_ = scores.tolist() A_ = [self.model.config.idalabel[label.item()] for label in labels] A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ = ["score", "label", "box"] A_ = [ dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : str=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Any=None , ): A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_input_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = num_labels A_ = num_choices A_ = scope def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[Any] ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ): A_ = DistilBertModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ): A_ = DistilBertForMaskedLM(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ): A_ = DistilBertForQuestionAnswering(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model( UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ): A_ = self.num_labels A_ = DistilBertForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ): A_ = self.num_labels A_ = DistilBertForTokenClassification(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ): A_ = self.num_choices A_ = DistilBertForMultipleChoice(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ = model( UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : str ): A_ = self.prepare_config_and_inputs() ((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs A_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _lowerCamelCase : List[Any] = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase : Any = True _lowerCamelCase : int = True _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : str = True def __A ( self : Union[str, Any] ): A_ = DistilBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , dim=37 ) def __A ( self : Dict ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase ) def __A ( self : Any ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase ) def __A ( self : List[str] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase ) @slow def __A ( self : Optional[int] ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = DistilBertModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @slow @require_torch_gpu def __A ( self : Optional[int] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return A_ = True A_ = model_class(config=UpperCAmelCase ) A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = torch.jit.trace( UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , "traced_model.pt" ) ) A_ = torch.jit.load(os.path.join(UpperCAmelCase , "traced_model.pt" ) , map_location=UpperCAmelCase ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase ) , inputs_dict["attention_mask"].to(UpperCAmelCase ) ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Optional[int] ): A_ = DistilBertModel.from_pretrained("distilbert-base-uncased" ) A_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) A_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0] A_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase ) A_ = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ , A_ = image.size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image[None].transpose(0 ,3 ,1 ,2 ) A_ = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = 1 elif isinstance(UpperCAmelCase , torch.Tensor ): A_ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' ) if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = preprocess(UpperCAmelCase ) A_ , A_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A_ = (batch_size, self.unet.config.in_channels // 2, height, width) A_ = next(self.unet.parameters() ).dtype A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) A_ = image.to(device=self.device , dtype=UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase , device=self.device ) A_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for t in self.progress_bar(UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. A_ = torch.cat([latents, image] , dim=1 ) A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE A_ = self.vqvae.decode(UpperCAmelCase ).sample A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 ) A_ = image / 2 + 0.5 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
86
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __a :Dict = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['pixel_values'] def __init__( self : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Dict[str, int]] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Optional[int] , ): super().__init__(**UpperCAmelCase ) A_ = size if size is not None else {"height": 224, "width": 224} A_ = get_size_dict(UpperCAmelCase ) A_ = crop_size if crop_size is not None else {"height": 224, "width": 224} A_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase , param_name="crop_size" ) A_ = do_resize A_ = do_rescale A_ = do_normalize A_ = do_center_crop A_ = crop_size A_ = size A_ = resample A_ = rescale_factor A_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN A_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __A ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ): A_ = get_size_dict(UpperCAmelCase ) if "shortest_edge" in size: A_ = get_resize_output_image_size(UpperCAmelCase , size=size["shortest_edge"] , default_to_square=UpperCAmelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: A_ = (size["height"], size["width"]) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ): A_ = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any ): return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ): return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Optional[int] , UpperCAmelCase : ImageInput , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase : str , ): A_ = do_resize if do_resize is not None else self.do_resize A_ = do_rescale if do_rescale is not None else self.do_rescale A_ = do_normalize if do_normalize is not None else self.do_normalize A_ = do_center_crop if do_center_crop is not None else self.do_center_crop A_ = crop_size if crop_size is not None else self.crop_size A_ = get_size_dict(UpperCAmelCase , param_name="crop_size" , default_to_square=UpperCAmelCase ) A_ = resample if resample is not None else self.resample A_ = rescale_factor if rescale_factor is not None else self.rescale_factor A_ = image_mean if image_mean is not None else self.image_mean A_ = image_std if image_std is not None else self.image_std A_ = size if size is not None else self.size A_ = get_size_dict(UpperCAmelCase ) if not is_batched(UpperCAmelCase ): A_ = [images] if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. A_ = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: A_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: A_ = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: A_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: A_ = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] A_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] A_ = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
86
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __a :list[bool | None] = [None] * 1000_0000 __a :Optional[Any] = True __a :List[Any] = False def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ = chain(next_number(__UpperCamelCase ) ) A_ = number_chain while number < 1000_0000: A_ = number_chain number *= 10 return number_chain def __snake_case ( __UpperCamelCase : int = 1000_0000 ): """simple docstring""" for i in range(1 ,__UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
86
1
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): __a :Optional[Any] = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: __a :List[Any] = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = (images / 2 + 0.5).clamp(0 ,1 ) A_ = images.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() A_ = numpy_to_pil(__UpperCamelCase ) return images def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if images.ndim == 3: A_ = images[None, ...] A_ = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images A_ = [Image.fromarray(image.squeeze() ,mode="L" ) for image in images] else: A_ = [Image.fromarray(__UpperCamelCase ) for image in images] return pil_images
86
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :List[Any] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
1
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , 0.1 ) A_ = Accelerator() A_ = accelerator.prepare(UpperCAmelCase ) try: pickle.loads(pickle.dumps(UpperCAmelCase ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
86
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :List[Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Optional[int] , UpperCAmelCase : int ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : List[str] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : Any , UpperCAmelCase : Dict ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
86
1
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __a :str = logging.get_logger('transformers.models.speecht5') __a :Optional[Any] = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } __a :Dict = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } __a :Any = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } __a :int = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } __a :Dict = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } __a :Union[str, Any] = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } __a :int = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } __a :List[str] = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } __a :List[str] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __a :str = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __a :Any = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __a :Optional[int] = [] __a :List[str] = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] __a :Union[str, Any] = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] __a :str = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] __a :Dict = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value elif weight_type == "running_mean": A_ = value elif weight_type == "running_var": A_ = value elif weight_type == "num_batches_tracked": A_ = value else: A_ = value logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_ , A_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [] if task == "s2t": A_ = hf_model.speechta.encoder.prenet.feature_encoder A_ = MAPPING_S2T A_ = IGNORE_KEYS_S2T elif task == "t2s": A_ = None A_ = MAPPING_T2S A_ = IGNORE_KEYS_T2S elif task == "s2s": A_ = hf_model.speechta.encoder.prenet.feature_encoder A_ = MAPPING_S2S A_ = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__UpperCamelCase ,__UpperCamelCase ): logger.info(f'''{name} was ignored''' ) continue A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A_ , A_ = key.split(".*." ) if prefix in name and suffix in name: A_ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: A_ = "weight" elif "running_mean" in name: A_ = "running_mean" elif "running_var" in name: A_ = "running_var" elif "num_batches_tracked" in name: A_ = "num_batches_tracked" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : Tuple=None ,): """simple docstring""" if config_path is not None: A_ = SpeechTaConfig.from_pretrained(__UpperCamelCase ) else: A_ = SpeechTaConfig() if task == "s2t": A_ = config.max_text_positions A_ = SpeechTaForSpeechToText(__UpperCamelCase ) elif task == "t2s": A_ = 1876 A_ = 600 A_ = config.max_speech_positions A_ = SpeechTaForTextToSpeech(__UpperCamelCase ) elif task == "s2s": A_ = 1876 A_ = config.max_speech_positions A_ = SpeechTaForSpeechToSpeech(__UpperCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: A_ = SpeechTaTokenizer(__UpperCamelCase ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A_ = AddedToken("<mask>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) A_ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A_ = SpeechTaFeatureExtractor() A_ = SpeechTaProcessor(tokenizer=__UpperCamelCase ,feature_extractor=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] ,__UpperCamelCase ,__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(__UpperCamelCase ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) __a :int = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
86
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __a :Any = logging.getLogger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ): super().__init__( UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , ) A_ = None def __A ( self : Dict , UpperCAmelCase : int ): logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually A_ = self._infer_socket_ifname() # avoid clash with the NCCL port A_ = str(distributed_port + 1 ) A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __A ( self : List[str] ): return dist.get_rank(group=self.process_group ) == 0 def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ): A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase ) dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group ) return target_tensor def __A ( self : Any ): A_ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase ) return ifname def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ): # single GPU training if not dist.is_initialized(): A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase ) # distributed training A_ = dist.get_world_size(group=self.process_group ) # gather logic A_ = None if self._is_main(): A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )] dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group ) # scatter logic A_ = question_hidden_states.shape[0] A_ = [] A_ = [] if self._is_main(): assert len(UpperCAmelCase ) == world_size A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase ) A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
86
1
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _a ( snake_case_ ): """simple docstring""" def __A ( self : int ): A_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase , "num_attention_heads" ) ) class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=[128, 256, 384] , UpperCAmelCase : List[Any]=[4, 6, 8] , UpperCAmelCase : Any=[2, 3, 4] , UpperCAmelCase : Dict=[16, 16, 16] , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : str=[2, 2, 2] , UpperCAmelCase : int=[2, 2, 2] , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=2 , ): A_ = parent A_ = batch_size A_ = image_size A_ = num_channels A_ = kernel_size A_ = stride A_ = padding A_ = hidden_sizes A_ = num_attention_heads A_ = depths A_ = key_dim A_ = drop_path_rate A_ = patch_size A_ = attention_ratio A_ = mlp_ratio A_ = initializer_range A_ = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] A_ = is_training A_ = use_labels A_ = num_labels A_ = initializer_range def __A ( self : List[str] ): A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.num_labels ) A_ = self.get_config() return config, pixel_values, labels def __A ( self : Dict ): return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def __A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Tuple ): A_ = LevitModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) A_ = (self.image_size, self.image_size) A_ , A_ = image_size[0], image_size[1] for _ in range(4 ): A_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) A_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ): A_ = self.num_labels A_ = LevitForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : str ): A_ = self.prepare_config_and_inputs() A_ , A_ , A_ = config_and_inputs A_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) _lowerCamelCase : int = ( { 'feature-extraction': LevitModel, 'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) _lowerCamelCase : int = False _lowerCamelCase : Any = False _lowerCamelCase : Any = False _lowerCamelCase : Dict = False _lowerCamelCase : Dict = False def __A ( self : int ): A_ = LevitModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def __A ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self : List[str] ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def __A ( self : Dict ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def __A ( self : str ): pass @unittest.skip(reason="Levit does not output attentions" ) def __A ( self : Dict ): pass def __A ( self : str ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) A_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ = [*signature.parameters.keys()] A_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def __A ( self : Dict ): def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ): A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = outputs.hidden_states A_ = len(self.model_tester.depths ) + 1 self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) A_ = (self.model_tester.image_size, self.model_tester.image_size) A_ , A_ = image_size[0], image_size[1] for _ in range(4 ): A_ = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) A_ = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __A ( self : Tuple ): pass def __A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict=False ): A_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) def __A ( self : Tuple ): if not self.model_tester.is_training: return A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCAmelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(**UpperCAmelCase ).loss loss.backward() def __A ( self : Union[str, Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A_ = False A_ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue A_ = model_class(UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(UpperCAmelCase ) model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(**UpperCAmelCase ).loss loss.backward() def __A ( self : Dict ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCAmelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ): A_ = problem_type["title"] A_ = problem_type["num_labels"] A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if problem_type["num_labels"] > 1: A_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A_ = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCAmelCase ) as warning_list: A_ = model(**UpperCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __A ( self : Optional[Any] ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = LevitModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __snake_case ( ): """simple docstring""" A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Any ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __A ( self : Any ): A_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) # verify the logits A_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) A_ = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
86
from jiwer import compute_measures import datasets __a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ): if concatenate_texts: return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"] else: A_ = 0 A_ = 0 for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ): A_ = compute_measures(UpperCAmelCase , UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
86
1
import warnings from ..trainer import Trainer from ..utils import logging __a :Union[str, Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : int , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Union[str, Any] ): warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase , ) super().__init__(args=UpperCAmelCase , **UpperCAmelCase )
86
class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = None A_ = None A_ = graph self._normalize_graph(UpperCAmelCase , UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = None def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ): if sources is int: A_ = [sources] if sinks is int: A_ = [sinks] if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0: return A_ = sources[0] A_ = sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1: A_ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A_ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A_ = max_input_flow A_ = 0 A_ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A_ = max_input_flow A_ = size - 1 def __A ( self : str ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __A ( self : Tuple , UpperCAmelCase : List[Any] ): A_ = algorithm(self ) class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : List[str] ): A_ = flow_network A_ = flow_network.verticesCount A_ = flow_network.sourceIndex A_ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A_ = flow_network.graph A_ = False def __A ( self : Optional[int] ): if not self.executed: self._algorithm() A_ = True def __A ( self : Dict ): pass class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ): super().__init__(UpperCAmelCase ) # use this to save your result A_ = -1 def __A ( self : Tuple ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ): super().__init__(UpperCAmelCase ) A_ = [[0] * self.verticies_count for i in range(self.verticies_count )] A_ = [0] * self.verticies_count A_ = [0] * self.verticies_count def __A ( self : List[str] ): A_ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A_ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A_ = 0 while i < len(UpperCAmelCase ): A_ = vertices_list[i] A_ = self.heights[vertex_index] self.process_vertex(UpperCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) ) A_ = 0 else: i += 1 A_ = sum(self.preflow[self.source_index] ) def __A ( self : List[str] , UpperCAmelCase : Dict ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCAmelCase , UpperCAmelCase ) self.relabel(UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ): A_ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A_ = self.heights[to_index] if min_height is not None: A_ = min_height + 1 if __name__ == "__main__": __a :Tuple = [0] __a :Tuple = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __a :List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __a :List[Any] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
86
1
from collections import deque def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = len(__UpperCamelCase ) A_ = deque() A_ = [False for _ in range(__UpperCamelCase )] A_ = [-1 for _ in range(__UpperCamelCase )] A_ = index_of[:] def strong_connect(__UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ): A_ = index # the number when this node is seen A_ = index # lowest rank node reachable from here index += 1 stack.append(__UpperCamelCase ) A_ = True for w in g[v]: if index_of[w] == -1: A_ = strong_connect(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: A_ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: A_ = [] A_ = stack.pop() A_ = False component.append(__UpperCamelCase ) while w != v: A_ = stack.pop() A_ = False component.append(__UpperCamelCase ) components.append(__UpperCamelCase ) return index A_ = [] for v in range(__UpperCamelCase ): if index_of[v] == -1: strong_connect(__UpperCamelCase ,0 ,__UpperCamelCase ) return components def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ): """simple docstring""" A_ = [[] for _ in range(__UpperCamelCase )] for u, v in edges: g[u].append(__UpperCamelCase ) return g if __name__ == "__main__": # Test __a :Optional[int] = 7 __a :Optional[int] = [0, 0, 1, 2, 3, 3, 4, 4, 6] __a :str = [1, 3, 2, 0, 1, 4, 5, 6, 5] __a :str = [(u, v) for u, v in zip(source, target)] __a :int = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
86
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :str = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
86
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :List[Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Optional[int] , UpperCAmelCase : int ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : List[str] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : Any , UpperCAmelCase : Dict ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
86
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A_ = f'''{src_lang}-{tgt_lang}''' A_ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = os.path.join(__UpperCamelCase ,"README.md" ) print(f'''Generating {path}''' ) with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f: f.write(__UpperCamelCase ) # make sure we are under the root of the project __a :Optional[Any] = Path(__file__).resolve().parent.parent.parent __a :Optional[Any] = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __a , __a , __a :int = model_name.split('-') __a :str = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
86
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[int] ): # This function is recursive """simple docstring""" A_ = len(__UpperCamelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else A_ = array[0] A_ = False A_ = 1 A_ = [] while not is_found and i < array_length: if array[i] < pivot: A_ = True A_ = [element for element in array[i:] if element >= array[i]] A_ = longest_subsequence(__UpperCamelCase ) if len(__UpperCamelCase ) > len(__UpperCamelCase ): A_ = temp_array else: i += 1 A_ = [element for element in array[1:] if element >= pivot] A_ = [pivot, *longest_subsequence(__UpperCamelCase )] if len(__UpperCamelCase ) > len(__UpperCamelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
86
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
86
1
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=99 , UpperCAmelCase : List[str]=32 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Dict=16 , UpperCAmelCase : int=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple="None" , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : str=4 , UpperCAmelCase : List[str]=None , ): A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_input_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = num_labels A_ = num_choices A_ = relative_attention A_ = position_biased_input A_ = pos_att_type A_ = scope def __A ( self : Tuple ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[int] ): return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __A ( self : Optional[int] , UpperCAmelCase : Tuple ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ): A_ = DebertaVaModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )[0] A_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )[0] A_ = model(UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ): A_ = DebertaVaForMaskedLM(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ): A_ = self.num_labels A_ = DebertaVaForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ): A_ = self.num_labels A_ = DebertaVaForTokenClassification(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ): A_ = DebertaVaForQuestionAnswering(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model( UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : int ): A_ = DebertaVaForMultipleChoice(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A_ = model( UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase : List[str] = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase : Dict = True _lowerCamelCase : Any = False _lowerCamelCase : str = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Optional[Any] = False def __A ( self : Dict ): A_ = DebertaVaModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : Union[str, Any] ): self.config_tester.run_common_tests() def __A ( self : List[str] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase ) def __A ( self : Any ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase ) def __A ( self : Any ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase ) def __A ( self : str ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase ) @slow def __A ( self : Optional[int] ): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = DebertaVaModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class _a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="Model not available yet" ) def __A ( self : Dict ): pass @slow def __A ( self : Tuple ): A_ = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" ) A_ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) A_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0] # compare the actual values for a slice. A_ = torch.tensor( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
86
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,) def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ): A_ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase ) return config def __A ( self : Optional[Any] ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def __A ( self : Dict ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def __A ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def __A ( self : Tuple ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase ) def __A ( self : int ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase ) def __A ( self : Union[str, Any] ): self.check_over_configs(thresholding=UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , ) def __A ( self : Optional[int] ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def __A ( self : Tuple ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = self.dummy_sample_deter + 0.1 A_ = self.dummy_sample_deter - 0.1 A_ = samplea.shape[0] A_ = torch.stack([samplea, samplea, samplea] , dim=0 ) A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase ) A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5_005 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config(prediction_type="v_prediction" ) A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def __A ( self : Union[str, Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase ) A_ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase ): if i == len(UpperCAmelCase ) - 1: A_ = -1 else: A_ = timesteps[i + 1] A_ = scheduler.previous_timestep(UpperCAmelCase ) A_ = prev_t.item() self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] A_ = len(UpperCAmelCase ) with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase )
86
1
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple=0 ): """simple docstring""" if name is None: A_ = None else: A_ = "." * max(0 ,spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}" A_ = fmt.format(__UpperCamelCase ) # Print and recurse (if needed). if isinstance(__UpperCamelCase ,__UpperCamelCase ): if msg is not None: print(__UpperCamelCase ) for k in val.keys(): recursive_print(__UpperCamelCase ,val[k] ,spaces + 2 ) elif isinstance(__UpperCamelCase ,torch.Tensor ): print(__UpperCamelCase ,":" ,val.size() ) else: print(__UpperCamelCase ,":" ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Any ): """simple docstring""" A_ = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] A_ = (num_heads, hidden_size, num_splits) + input_shape[1:] A_ = param.view(*__UpperCamelCase ) A_ = param.transpose(0 ,2 ) A_ = param.transpose(1 ,2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] A_ = (num_heads, num_splits, hidden_size) + input_shape[1:] A_ = param.view(*__UpperCamelCase ) A_ = param.transpose(0 ,1 ).contiguous() A_ = param.view(*__UpperCamelCase ) return param def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : Any ): """simple docstring""" A_ = {} # old versions did not store training args A_ = input_state_dict.get("args" ,__UpperCamelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) A_ = ds_args.padded_vocab_size A_ = ds_args.max_position_embeddings A_ = ds_args.hidden_size A_ = ds_args.num_layers A_ = ds_args.num_attention_heads A_ = ds_args.ffn_hidden_size # pprint(config) # The number of heads. A_ = config.n_head # The hidden_size per head. A_ = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): A_ = input_state_dict["checkpoint_version"] else: A_ = 0.0 # The model. A_ = input_state_dict["model"] # The language model. A_ = model["language_model"] # The embeddings. A_ = lm["embedding"] # The word embeddings. A_ = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. A_ = word_embeddings[: config.vocab_size, :] A_ = word_embeddings # The position embeddings. A_ = embeddings["position_embeddings"]["weight"] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] A_ = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. A_ = pos_embeddings # The transformer. A_ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. A_ = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" ) # The simple map of names for "automated" rules. A_ = { "attention.dense": ".attn.c_proj.", "self_attention.dense": ".attn.c_proj.", "mlp.dense_h_to_4h": ".mlp.c_fc.", "mlp.dense_4h_to_h": ".mlp.c_proj.", } # Extract the layers. for key, val in transformer.items(): # Match the name. A_ = layer_re.match(__UpperCamelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. A_ = int(m.group(1 ) ) # The name of the operation. A_ = m.group(2 ) # Is it a weight or a bias? A_ = m.group(3 ) # The name of the layer. A_ = f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm" ): A_ = "ln_1" if op_name.startswith("input" ) else "ln_2" A_ = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. A_ = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view( 1 ,1 ,__UpperCamelCase ,__UpperCamelCase ) A_ = causal_mask # Insert a "dummy" tensor for masked_bias. A_ = torch.tensor(-1E4 ,dtype=torch.floataa ) A_ = masked_bias A_ = fix_query_key_value_ordering(__UpperCamelCase ,__UpperCamelCase ,3 ,__UpperCamelCase ,__UpperCamelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. A_ = out_val.transpose(0 ,1 ).contiguous() # Store. A_ = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": A_ = fix_query_key_value_ordering(__UpperCamelCase ,__UpperCamelCase ,3 ,__UpperCamelCase ,__UpperCamelCase ) # Store. No change of shape. A_ = out_val # Transpose the weights. elif weight_or_bias == "weight": A_ = megatron_to_transformers[op_name] A_ = val.transpose(0 ,1 ) # Copy the bias. elif weight_or_bias == "bias": A_ = megatron_to_transformers[op_name] A_ = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. A_ = transformer["final_layernorm.weight"] A_ = transformer["final_layernorm.bias"] # For LM head, transformers' wants the matrix to weight embeddings. A_ = word_embeddings # It should be done! return output_state_dict def __snake_case ( ): """simple docstring""" A_ = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure" ,action="store_true" ) parser.add_argument( "path_to_checkpoint" ,type=__UpperCamelCase ,help="Path to the checkpoint file (.zip archive or direct .pt file)" ,) parser.add_argument( "--config_file" ,default="" ,type=__UpperCamelCase ,help="An optional config json file describing the pre-trained model." ,) A_ = parser.parse_args() # Extract the basename. A_ = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith(".zip" ): with zipfile.ZipFile(args.path_to_checkpoint ,"r" ) as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict: A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) else: A_ = torch.load(args.path_to_checkpoint ,map_location="cpu" ) A_ = input_state_dict.get("args" ,__UpperCamelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: A_ = "gelu_fast" elif ds_args.openai_gelu: A_ = "gelu_new" else: A_ = "gelu" else: # in the very early days this used to be "gelu_new" A_ = "gelu_new" # Spell out all parameters in case the defaults change. A_ = GPTaConfig( vocab_size=5_0257 ,n_positions=1024 ,n_embd=1024 ,n_layer=24 ,n_head=16 ,n_inner=4096 ,activation_function=__UpperCamelCase ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1E-5 ,initializer_range=0.02 ,summary_type="cls_index" ,summary_use_proj=__UpperCamelCase ,summary_activation=__UpperCamelCase ,summary_proj_to_labels=__UpperCamelCase ,summary_first_dropout=0.1 ,scale_attn_weights=__UpperCamelCase ,use_cache=__UpperCamelCase ,bos_token_id=5_0256 ,eos_token_id=5_0256 ,) else: A_ = GPTaConfig.from_json_file(args.config_file ) A_ = ["GPT2LMHeadModel"] # Convert. print("Converting" ) A_ = convert_megatron_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__UpperCamelCase ,__UpperCamelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: A_ = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": A_ = "gpt2" elif tokenizer_type == "PretrainedFromHF": A_ = ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: A_ = "gpt2" A_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) A_ = type(__UpperCamelCase ).__name__ A_ = tokenizer_class # Store the config to file. print("Saving config" ) config.save_pretrained(__UpperCamelCase ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(__UpperCamelCase ) # Store the state_dict to file. A_ = os.path.join(__UpperCamelCase ,"pytorch_model.bin" ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(__UpperCamelCase ,__UpperCamelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
86
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ): """simple docstring""" with open(__UpperCamelCase ) as metadata_file: A_ = json.load(__UpperCamelCase ) A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) # Load the entity vocab file A_ = load_entity_vocab(__UpperCamelCase ) A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ) # Initialize the embeddings of the special tokens A_ = state_dict["embeddings.word_embeddings.weight"] A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) A_ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A_ = f'''encoder.layer.{layer_index}.attention.self.''' A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A_ = state_dict["entity_embeddings.entity_embeddings.weight"] A_ = entity_emb[entity_vocab["[MASK]"]] A_ = LukeModel(config=__UpperCamelCase ).eval() A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" ) A_ = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) A_ = (39, 42) A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" ) A_ = model(**__UpperCamelCase ) # Verify word hidden states if model_size == "large": A_ = torch.Size((1, 42, 1024) ) A_ = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base A_ = torch.Size((1, 42, 768) ) A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": A_ = torch.Size((1, 1, 1024) ) A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base A_ = torch.Size((1, 1, 768) ) A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__UpperCamelCase ) ) model.save_pretrained(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = {} with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: for index, line in enumerate(__UpperCamelCase ): A_ , A_ = line.rstrip().split("\t" ) A_ = index return entity_vocab if __name__ == "__main__": __a :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __a :Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
86
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __a :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = XGLMTokenizer _lowerCamelCase : Union[str, Any] = XGLMTokenizerFast _lowerCamelCase : Dict = True _lowerCamelCase : Optional[Any] = True def __A ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : Dict ): A_ = "<pad>" A_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : Optional[int] ): A_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(UpperCAmelCase ) , 1008 ) def __A ( self : int ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __A ( self : Dict ): A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase ) A_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __A ( self : List[Any] ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def __A ( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCAmelCase , f.name ) A_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase ) A_ = pickle.dumps(UpperCAmelCase ) pickle.loads(UpperCAmelCase ) def __A ( self : str ): if not self.test_rust_tokenizer: return A_ = self.get_tokenizer() A_ = self.get_rust_tokenizer() A_ = "I was born in 92000, and this is falsé." A_ = tokenizer.tokenize(UpperCAmelCase ) A_ = rust_tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = self.get_rust_tokenizer() A_ = tokenizer.encode(UpperCAmelCase ) A_ = rust_tokenizer.encode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : Optional[int] ): A_ = "Hello World!" A_ = [2, 31227, 4447, 35] self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) ) @slow def __A ( self : Any ): A_ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off A_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) ) @slow def __A ( self : Any ): # fmt: off A_ = { "input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase , model_name="facebook/xglm-564M" , padding=UpperCAmelCase , )
86
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __a :Optional[Any] = 'true' def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ): """simple docstring""" set_seed(42 ) A_ = RegressionModel() A_ = deepcopy(__UpperCamelCase ) A_ = RegressionDataset(length=__UpperCamelCase ) A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase ) model.to(accelerator.device ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return model, ddp_model, dataloader def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ): """simple docstring""" A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) A_ = load_dataset("glue" ,"mrpc" ,split="validation" ) def tokenize_function(__UpperCamelCase : Optional[Any] ): A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ) return outputs with accelerator.main_process_first(): A_ = dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,) A_ = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__UpperCamelCase : Union[str, Any] ): if use_longest: return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" ) return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" ) return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ): """simple docstring""" A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase ) A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches ) A_ = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] for batch in dataloader: A_ , A_ = batch.values() with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ , A_ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A_ , A_ = [], [] for logit, targ in logits_and_targets: logits.append(__UpperCamelCase ) targs.append(__UpperCamelCase ) A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase ) return logits, targs def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ): """simple docstring""" A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) assert ( len(__UpperCamelCase ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}''' def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ): """simple docstring""" A_ = evaluate.load("glue" ,"mrpc" ) A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase ) # First do baseline A_ , A_ , A_ = setup["no"] model.to(__UpperCamelCase ) model.eval() for batch in dataloader: batch.to(__UpperCamelCase ) with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] ) A_ = metric.compute() # Then do distributed A_ , A_ , A_ = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) A_ = batch["labels"] A_ , A_ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase ) A_ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def __snake_case ( ): """simple docstring""" A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__UpperCamelCase ,__UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__UpperCamelCase ,99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) A_ = Accelerator() test_torch_metrics(__UpperCamelCase ,512 ) accelerator.state._reset_state() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
86
1
from bisect import bisect from itertools import accumulate def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase ) A_ , A_ = [i[0] for i in r], [i[1] for i in r] A_ = list(accumulate(__UpperCamelCase ) ) A_ = bisect(__UpperCamelCase ,__UpperCamelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
86
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __a :Optional[Any] = 'src/transformers' __a :Tuple = 'docs/source/en/tasks' def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.readlines() # Find the start prompt. A_ = 0 while not lines[start_index].startswith(__UpperCamelCase ): start_index += 1 start_index += 1 A_ = start_index while not lines[end_index].startswith(__UpperCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __a :List[str] = direct_transformers_import(TRANSFORMERS_PATH) __a :Optional[Any] = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __a :Optional[Any] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = TASK_GUIDE_TO_MODELS[task_guide] A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() ) A_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ , A_ , A_ , A_ = _find_text_in_file( filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,) A_ = get_model_list_for_task(__UpperCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' " to fix this." ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a :Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
86
1
import sys __a :Dict = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = 1 for digit in s: product *= int(__UpperCamelCase ) return product def __snake_case ( __UpperCamelCase : str = N ): """simple docstring""" A_ = -sys.maxsize - 1 A_ = n[:13] A_ = 13 while cur_index < len(__UpperCamelCase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): A_ = substr[1:] + n[cur_index] cur_index += 1 else: A_ = max(__UpperCamelCase ,str_eval(__UpperCamelCase ) ) A_ = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"{solution() = }")
86
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __a :Dict = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = ViTConfig() A_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A_ = True A_ = int(vit_name[-12:-10] ) A_ = int(vit_name[-9:-6] ) else: A_ = 1000 A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} A_ = int(vit_name[-6:-4] ) A_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A_ = 192 A_ = 768 A_ = 12 A_ = 3 elif vit_name[9:].startswith("small" ): A_ = 384 A_ = 1536 A_ = 12 A_ = 6 else: pass else: if vit_name[4:].startswith("small" ): A_ = 768 A_ = 2304 A_ = 8 A_ = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 elif vit_name[4:].startswith("huge" ): A_ = 1280 A_ = 5120 A_ = 32 A_ = 16 # load original model from timm A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": A_ = ViTModel(__UpperCamelCase ).eval() else: A_ = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A_ = DeiTImageProcessor(size=config.image_size ) else: A_ = ViTImageProcessor(size=config.image_size ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = encoding["pixel_values"] A_ = model(__UpperCamelCase ) if base_model: A_ = timm_model.forward_features(__UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 ) else: A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[int] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
86
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[Any] ): A_ = tempfile.mkdtemp() # fmt: off A_ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) A_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] A_ = {"unk_token": "<unk>"} A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase ) ) A_ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } A_ = os.path.join(self.tmpdirname , UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Any , **UpperCAmelCase : str ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCAmelCase ) def __A ( self : Dict , **UpperCAmelCase : Optional[Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCAmelCase ) def __A ( self : List[Any] , **UpperCAmelCase : str ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def __A ( self : Tuple ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = self.get_tokenizer() A_ = self.get_rust_tokenizer() A_ = self.get_image_processor() A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase ) A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase ) def __A ( self : Any ): A_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase ) A_ = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : Optional[int] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase , return_tensors="np" ) A_ = tokenizer(UpperCAmelCase , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : int ): A_ = "google/owlvit-base-patch32" A_ = OwlViTProcessor.from_pretrained(UpperCAmelCase ) A_ = ["cat", "nasa badge"] A_ = processor(text=UpperCAmelCase ) A_ = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Optional[int] ): A_ = "google/owlvit-base-patch32" A_ = OwlViTProcessor.from_pretrained(UpperCAmelCase ) A_ = [["cat", "nasa badge"], ["person"]] A_ = processor(text=UpperCAmelCase ) A_ = 16 A_ = len(UpperCAmelCase ) A_ = max([len(UpperCAmelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Optional[Any] ): A_ = "google/owlvit-base-patch32" A_ = OwlViTProcessor.from_pretrained(UpperCAmelCase ) A_ = ["cat", "nasa badge"] A_ = processor(text=UpperCAmelCase ) A_ = 16 A_ = inputs["input_ids"] A_ = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = self.prepare_image_inputs() A_ = processor(images=UpperCAmelCase , query_images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : List[str] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
86
def __snake_case ( __UpperCamelCase : int = 50 ): """simple docstring""" A_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
86
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : jnp.ndarray _lowerCamelCase : jnp.ndarray class _a ( nn.Module ): """simple docstring""" _lowerCamelCase : int _lowerCamelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) _lowerCamelCase : jnp.dtype = jnp.floataa def __A ( self : Optional[Any] ): A_ = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) A_ = [] for i in range(len(self.block_out_channels ) - 1 ): A_ = self.block_out_channels[i] A_ = self.block_out_channels[i + 1] A_ = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCAmelCase ) A_ = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCAmelCase ) A_ = blocks A_ = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : int , UpperCAmelCase : int ): A_ = self.conv_in(UpperCAmelCase ) A_ = nn.silu(UpperCAmelCase ) for block in self.blocks: A_ = block(UpperCAmelCase ) A_ = nn.silu(UpperCAmelCase ) A_ = self.conv_out(UpperCAmelCase ) return embedding @flax_register_to_config class _a ( nn.Module , snake_case_ , snake_case_ ): """simple docstring""" _lowerCamelCase : int = 3_2 _lowerCamelCase : int = 4 _lowerCamelCase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _lowerCamelCase : Union[bool, Tuple[bool]] = False _lowerCamelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _lowerCamelCase : int = 2 _lowerCamelCase : Union[int, Tuple[int]] = 8 _lowerCamelCase : Optional[Union[int, Tuple[int]]] = None _lowerCamelCase : int = 1_2_8_0 _lowerCamelCase : float = 0.0 _lowerCamelCase : bool = False _lowerCamelCase : jnp.dtype = jnp.floataa _lowerCamelCase : bool = True _lowerCamelCase : int = 0 _lowerCamelCase : str = "rgb" _lowerCamelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def __A ( self : List[Any] , UpperCAmelCase : jax.random.KeyArray ): # init input tensors A_ = (1, self.in_channels, self.sample_size, self.sample_size) A_ = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa ) A_ = jnp.ones((1,) , dtype=jnp.intaa ) A_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) A_ = (1, 3, self.sample_size * 8, self.sample_size * 8) A_ = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa ) A_ , A_ = jax.random.split(UpperCAmelCase ) A_ = {"params": params_rng, "dropout": dropout_rng} return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"] def __A ( self : str ): A_ = self.block_out_channels A_ = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. A_ = self.num_attention_heads or self.attention_head_dim # input A_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time A_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) A_ = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype ) A_ = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) A_ = self.only_cross_attention if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = (num_attention_heads,) * len(self.down_block_types ) # down A_ = [] A_ = [] A_ = block_out_channels[0] A_ = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCAmelCase ) for i, down_block_type in enumerate(self.down_block_types ): A_ = output_channel A_ = block_out_channels[i] A_ = i == len(UpperCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": A_ = FlaxCrossAttnDownBlockaD( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: A_ = FlaxDownBlockaD( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(UpperCAmelCase ) for _ in range(self.layers_per_block ): A_ = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCAmelCase ) if not is_final_block: A_ = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCAmelCase ) A_ = down_blocks A_ = controlnet_down_blocks # mid A_ = block_out_channels[-1] A_ = FlaxUNetMidBlockaDCrossAttn( in_channels=UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) A_ = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : float = 1.0 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ): A_ = self.controlnet_conditioning_channel_order if channel_order == "bgr": A_ = jnp.flip(UpperCAmelCase , axis=1 ) # 1. time if not isinstance(UpperCAmelCase , jnp.ndarray ): A_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: A_ = timesteps.astype(dtype=jnp.floataa ) A_ = jnp.expand_dims(UpperCAmelCase , 0 ) A_ = self.time_proj(UpperCAmelCase ) A_ = self.time_embedding(UpperCAmelCase ) # 2. pre-process A_ = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) ) A_ = self.conv_in(UpperCAmelCase ) A_ = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) ) A_ = self.controlnet_cond_embedding(UpperCAmelCase ) sample += controlnet_cond # 3. down A_ = (sample,) for down_block in self.down_blocks: if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ , A_ = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train ) else: A_ , A_ = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid A_ = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train ) # 5. contronet blocks A_ = () for down_block_res_sample, controlnet_block in zip(UpperCAmelCase , self.controlnet_down_blocks ): A_ = controlnet_block(UpperCAmelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) A_ = controlnet_down_block_res_samples A_ = self.controlnet_mid_block(UpperCAmelCase ) # 6. scaling A_ = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=UpperCAmelCase , mid_block_res_sample=UpperCAmelCase )
86
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __a :List[str] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , **UpperCAmelCase : List[str] ): super().__init__(**UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type(UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ): if "text_queries" in kwargs: A_ = kwargs.pop("text_queries" ) if isinstance(UpperCAmelCase , (str, Image.Image) ): A_ = {"image": image, "candidate_labels": candidate_labels} else: A_ = image A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase ) return results def __A ( self : int , **UpperCAmelCase : Tuple ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] if "top_k" in kwargs: A_ = kwargs["top_k"] return {}, {}, postprocess_params def __A ( self : List[str] , UpperCAmelCase : Dict ): A_ = load_image(inputs["image"] ) A_ = inputs["candidate_labels"] if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = candidate_labels.split("," ) A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase ): A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework ) A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self : str , UpperCAmelCase : int ): A_ = model_inputs.pop("target_size" ) A_ = model_inputs.pop("candidate_label" ) A_ = model_inputs.pop("is_last" ) A_ = self.model(**UpperCAmelCase ) A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ): A_ = [] for model_output in model_outputs: A_ = model_output["candidate_label"] A_ = BaseModelOutput(UpperCAmelCase ) A_ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): A_ = outputs["scores"][index].item() A_ = self._get_bounding_box(outputs["boxes"][index][0] ) A_ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase ) A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase ) if top_k: A_ = results[:top_k] return results def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
1
def __snake_case ( __UpperCamelCase : int = 100 ): """simple docstring""" A_ = n * (n + 1) * (2 * n + 1) / 6 A_ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"{solution() = }")
86
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) __a :int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __a :Tuple = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ): """simple docstring""" A_ = torch.load(__UpperCamelCase ) A_ = WavLMConfigOrig(checkpoint["cfg"] ) A_ = WavLMOrig(__UpperCamelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: A_ = WavLMConfig.from_pretrained(__UpperCamelCase ) else: A_ = WavLMConfig() A_ = WavLMModel(__UpperCamelCase ) recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ) hf_wavlm.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a :Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
1
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def __snake_case ( __UpperCamelCase : Dict[str, torch.Tensor] ): """simple docstring""" A_ = [] A_ = [] A_ = [] for rt in rc.restypes: A_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) A_ = {name: i for i, name in enumerate(__UpperCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) A_ = torch.tensor( __UpperCamelCase ,dtype=torch.intaa ,device=protein["aatype"].device ,) A_ = torch.tensor( __UpperCamelCase ,dtype=torch.intaa ,device=protein["aatype"].device ,) A_ = torch.tensor( __UpperCamelCase ,dtype=torch.floataa ,device=protein["aatype"].device ,) A_ = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein A_ = restype_atomaa_to_atomaa[protein_aatype] A_ = restype_atomaa_mask[protein_aatype] A_ = residx_atomaa_mask A_ = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back A_ = restype_atomaa_to_atomaa[protein_aatype] A_ = residx_atomaa_to_atomaa.long() # create the corresponding mask A_ = torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): A_ = rc.restype_atoa[restype_letter] A_ = rc.residue_atoms[restype_name] for atom_name in atom_names: A_ = rc.atom_order[atom_name] A_ = 1 A_ = restype_atomaa_mask[protein_aatype] A_ = residx_atomaa_mask return protein def __snake_case ( __UpperCamelCase : Dict[str, torch.Tensor] ): """simple docstring""" A_ = tree_map(lambda __UpperCamelCase : torch.tensor(__UpperCamelCase ,device=batch["aatype"].device ) ,__UpperCamelCase ,np.ndarray ) A_ = tensor_tree_map(lambda __UpperCamelCase : np.array(__UpperCamelCase ) ,make_atomaa_masks(__UpperCamelCase ) ) return out
86
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ): """simple docstring""" A_ = length or len(__UpperCamelCase ) A_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A_ , A_ = list_data[i + 1], list_data[i] A_ = True return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
def __snake_case ( __UpperCamelCase : int = 10**9 ): """simple docstring""" A_ = 1 A_ = 2 A_ = 0 A_ = 0 A_ = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"{solution() = }")
86
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , 0.1 ) A_ = Accelerator() A_ = accelerator.prepare(UpperCAmelCase ) try: pickle.loads(pickle.dumps(UpperCAmelCase ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
86
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a :List[str] = logging.get_logger(__name__) __a :Tuple = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'table-transformer' _lowerCamelCase : Any = ['past_key_values'] _lowerCamelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Dict , UpperCAmelCase : str=True , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[str]=100 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : Optional[int]=2048 , UpperCAmelCase : Any=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : str=2048 , UpperCAmelCase : Dict=8 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : Any="relu" , UpperCAmelCase : int=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : str=0.02 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict="sine" , UpperCAmelCase : List[str]="resnet50" , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[int]=0.1 , **UpperCAmelCase : int , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) A_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = backbone_config.get("model_type" ) A_ = CONFIG_MAPPING[backbone_model_type] A_ = config_class.from_dict(UpperCAmelCase ) # set timm attributes to None A_ , A_ , A_ = None, None, None A_ = use_timm_backbone A_ = backbone_config A_ = num_channels A_ = num_queries A_ = d_model A_ = encoder_ffn_dim A_ = encoder_layers A_ = encoder_attention_heads A_ = decoder_ffn_dim A_ = decoder_layers A_ = decoder_attention_heads A_ = dropout A_ = attention_dropout A_ = activation_dropout A_ = activation_function A_ = init_std A_ = init_xavier_std A_ = encoder_layerdrop A_ = decoder_layerdrop A_ = encoder_layers A_ = auxiliary_loss A_ = position_embedding_type A_ = backbone A_ = use_pretrained_backbone A_ = dilation # Hungarian matcher A_ = class_cost A_ = bbox_cost A_ = giou_cost # Loss coefficients A_ = mask_loss_coefficient A_ = dice_loss_coefficient A_ = bbox_loss_coefficient A_ = giou_loss_coefficient A_ = eos_coefficient super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : int ): return self.encoder_attention_heads @property def __A ( self : int ): return self.d_model class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = version.parse('1.11' ) @property def __A ( self : Any ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __A ( self : Any ): return 1E-5 @property def __A ( self : List[Any] ): return 12
86
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a :List[str] = logging.get_logger(__name__) __a :Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __a :Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ = None for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True elif name.split("." )[0] == "proj": A_ = fairseq_model.proj A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.split(" " )[0] for line in lines] A_ = len(__UpperCamelCase ) A_ = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,): """simple docstring""" A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase ) A_ = SpeechaTextaConfig.from_pretrained( __UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase ) A_ = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ = model[0].eval() # set weights for wav2vec2 encoder A_ = WavaVecaModel(__UpperCamelCase ) A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase ) A_ = SpeechaTextaForCausalLM(__UpperCamelCase ) A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase ) A_ = False # add projection layer A_ = nn.Parameter(projection_layer.weight ) A_ = nn.Parameter(projection_layer.bias ) A_ = create_vocab_dict(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) ) tokenizer.save_pretrained(__UpperCamelCase ) A_ = hf_wavavec.config.to_dict() A_ = tokenizer.pad_token_id A_ = tokenizer.bos_token_id A_ = tokenizer.eos_token_id A_ = "speech_to_text_2" A_ = "wav2vec2" A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) feature_extractor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __a :Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
86
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :Optional[Any] = { 'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : int = 'data2vec-audio' def __init__( self : Dict , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : int=12 , UpperCAmelCase : int=12 , UpperCAmelCase : Optional[Any]=3072 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : int=1E-5 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : List[Any]=19 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[str]=0.05 , UpperCAmelCase : int=10 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : int=10 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : List[Any]="sum" , UpperCAmelCase : int=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=256 , UpperCAmelCase : Union[str, Any]=(512, 512, 512, 512, 1500) , UpperCAmelCase : Dict=(5, 3, 3, 1, 1) , UpperCAmelCase : Union[str, Any]=(1, 2, 3, 1, 1) , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : int=0 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=False , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : str=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase ) A_ = hidden_size A_ = feat_extract_activation A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = conv_bias A_ = num_conv_pos_embeddings A_ = num_conv_pos_embedding_groups A_ = conv_pos_kernel_size A_ = len(self.conv_dim ) A_ = num_hidden_layers A_ = intermediate_size A_ = hidden_act A_ = num_attention_heads A_ = hidden_dropout A_ = attention_dropout A_ = activation_dropout A_ = feat_proj_dropout A_ = final_dropout A_ = layerdrop A_ = layer_norm_eps A_ = initializer_range A_ = vocab_size A_ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ = mask_time_prob A_ = mask_time_length A_ = mask_time_min_masks A_ = mask_feature_prob A_ = mask_feature_length A_ = mask_feature_min_masks # ctc loss A_ = ctc_loss_reduction A_ = ctc_zero_infinity # adapter A_ = add_adapter A_ = adapter_kernel_size A_ = adapter_stride A_ = num_adapter_layers A_ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = xvector_output_dim @property def __A ( self : Optional[Any] ): return math.prod(self.conv_stride )
86
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a :str = logging.get_logger(__name__) __a :Any = Dict[str, Any] __a :int = List[Prediction] @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __A ( self : str , **UpperCAmelCase : str ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return super().__call__(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any ): A_ = load_image(UpperCAmelCase ) A_ = torch.IntTensor([[image.height, image.width]] ) A_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) A_ = target_size return inputs def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = model_inputs.pop("target_size" ) A_ = self.model(**UpperCAmelCase ) A_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: A_ = model_inputs["bbox"] return model_outputs def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ): A_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_ , A_ = target_size[0].tolist() def unnormalize(UpperCAmelCase : Any ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )] A_ = ["score", "label", "box"] A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = raw_annotations[0] A_ = raw_annotation["scores"] A_ = raw_annotation["labels"] A_ = raw_annotation["boxes"] A_ = scores.tolist() A_ = [self.model.config.idalabel[label.item()] for label in labels] A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ = ["score", "label", "box"] A_ = [ dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
1
from __future__ import annotations class _a : """simple docstring""" def __init__( self : Dict , UpperCAmelCase : int ): A_ = order # a_{0} ... a_{k} A_ = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ = [0.0] * self.order # y[n-1] ... y[n-k] A_ = [0.0] * self.order def __A ( self : Union[str, Any] , UpperCAmelCase : list[float] , UpperCAmelCase : list[float] ): if len(UpperCAmelCase ) < self.order: A_ = [1.0, *a_coeffs] if len(UpperCAmelCase ) != self.order + 1: A_ = ( f'''Expected a_coeffs to have {self.order + 1} elements ''' f'''for {self.order}-order filter, got {len(UpperCAmelCase )}''' ) raise ValueError(UpperCAmelCase ) if len(UpperCAmelCase ) != self.order + 1: A_ = ( f'''Expected b_coeffs to have {self.order + 1} elements ''' f'''for {self.order}-order filter, got {len(UpperCAmelCase )}''' ) raise ValueError(UpperCAmelCase ) A_ = a_coeffs A_ = b_coeffs def __A ( self : Tuple , UpperCAmelCase : float ): A_ = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ = self.input_history[:-1] A_ = self.output_history[:-1] A_ = sample A_ = result return result
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ , A_ = image.size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image[None].transpose(0 ,3 ,1 ,2 ) A_ = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = 1 elif isinstance(UpperCAmelCase , torch.Tensor ): A_ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' ) if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = preprocess(UpperCAmelCase ) A_ , A_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A_ = (batch_size, self.unet.config.in_channels // 2, height, width) A_ = next(self.unet.parameters() ).dtype A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) A_ = image.to(device=self.device , dtype=UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase , device=self.device ) A_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for t in self.progress_bar(UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. A_ = torch.cat([latents, image] , dim=1 ) A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE A_ = self.vqvae.decode(UpperCAmelCase ).sample A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 ) A_ = image / 2 + 0.5 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
86
1
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __a :Optional[int] = { '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, } def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Any ): """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider''' f''' reinstalling {pkg}.''' ) if not ops[op](version.parse(__UpperCamelCase ) ,version.parse(__UpperCamelCase ) ): raise ImportError( f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" A_ = f'''\n{hint}''' if hint is not None else "" # non-versioned check if re.match(R"^[\w_\-\d]+$" ,__UpperCamelCase ): A_ , A_ , A_ = requirement, None, None else: A_ = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,__UpperCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f''' got {requirement}''' ) A_ , A_ = match[0] A_ = want_full.split("," ) # there could be multiple requirements A_ = {} for w in want_range: A_ = re.findall(R"^([\s!=<>]{1,2})(.+)" ,__UpperCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f''' but got {requirement}''' ) A_ , A_ = match[0] A_ = want_ver if op not in ops: raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' ) # special case if pkg == "python": A_ = ".".join([str(__UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return # check if any version is installed try: A_ = importlib.metadata.version(__UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(__UpperCamelCase ,__UpperCamelCase )
86
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __a :list[bool | None] = [None] * 1000_0000 __a :Optional[Any] = True __a :List[Any] = False def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ = chain(next_number(__UpperCamelCase ) ) A_ = number_chain while number < 1000_0000: A_ = number_chain number *= 10 return number_chain def __snake_case ( __UpperCamelCase : int = 1000_0000 ): """simple docstring""" for i in range(1 ,__UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
86
1
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar __a :str = TypeVar('T') class _a ( Generic[T] ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : bool = True ): A_ = {} # dictionary of lists A_ = directed def __A ( self : List[Any] , UpperCAmelCase : T , UpperCAmelCase : T ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase ) self.adj_list[destination_vertex].append(UpperCAmelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase ) A_ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase ) A_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: A_ = [destination_vertex] A_ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase ) A_ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: A_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: A_ = [destination_vertex] A_ = [] return self def __repr__( self : Optional[Any] ): return pformat(self.adj_list )
86
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :List[Any] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
1
from ...configuration_utils import PretrainedConfig __a :str = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : int = 'tapas' def __init__( self : Optional[Any] , UpperCAmelCase : Dict=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Dict=3072 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=1024 , UpperCAmelCase : Dict=[3, 256, 256, 2, 256, 256, 10] , UpperCAmelCase : str=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Optional[int]=10.0 , UpperCAmelCase : int=0 , UpperCAmelCase : Any=1.0 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Union[str, Any]=1.0 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : Optional[int]=1.0 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[str]="ratio" , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Union[str, Any]=64 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : str , ): super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = hidden_act A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_sizes A_ = initializer_range A_ = layer_norm_eps # Fine-tuning task hyperparameters A_ = positive_label_weight A_ = num_aggregation_labels A_ = aggregation_loss_weight A_ = use_answer_as_supervision A_ = answer_loss_importance A_ = use_normalized_answer_loss A_ = huber_loss_delta A_ = temperature A_ = aggregation_temperature A_ = use_gumbel_for_cells A_ = use_gumbel_for_aggregation A_ = average_approximation_function A_ = cell_selection_preference A_ = answer_loss_cutoff A_ = max_num_rows A_ = max_num_columns A_ = average_logits_per_cell A_ = select_one_column A_ = allow_empty_column_selection A_ = init_cell_selection_weights_to_zero A_ = reset_position_index_per_cell A_ = disable_per_token_loss # Aggregation hyperparameters A_ = aggregation_labels A_ = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCAmelCase ): A_ = {int(UpperCAmelCase ): v for k, v in aggregation_labels.items()}
86
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :List[Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Optional[int] , UpperCAmelCase : int ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : List[str] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : Any , UpperCAmelCase : Dict ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
86
1
from math import ceil def __snake_case ( __UpperCamelCase : int = 1001 ): """simple docstring""" A_ = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): A_ = 2 * i + 1 A_ = 2 * i A_ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: __a :Union[str, Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
86
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __a :Any = logging.getLogger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ): super().__init__( UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , ) A_ = None def __A ( self : Dict , UpperCAmelCase : int ): logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually A_ = self._infer_socket_ifname() # avoid clash with the NCCL port A_ = str(distributed_port + 1 ) A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __A ( self : List[str] ): return dist.get_rank(group=self.process_group ) == 0 def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ): A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase ) dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group ) return target_tensor def __A ( self : Any ): A_ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase ) return ifname def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ): # single GPU training if not dist.is_initialized(): A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase ) # distributed training A_ = dist.get_world_size(group=self.process_group ) # gather logic A_ = None if self._is_main(): A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )] dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group ) # scatter logic A_ = question_hidden_states.shape[0] A_ = [] A_ = [] if self._is_main(): assert len(UpperCAmelCase ) == world_size A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase ) A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
86
1
def __snake_case ( __UpperCamelCase : int = 6008_5147_5143 ): """simple docstring""" try: A_ = int(__UpperCamelCase ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) A_ = 2 A_ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A_ = i while n % i == 0: A_ = n // i i += 1 return int(__UpperCamelCase ) if __name__ == "__main__": print(F"{solution() = }")
86
from jiwer import compute_measures import datasets __a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ): if concatenate_texts: return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"] else: A_ = 0 A_ = 0 for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ): A_ = compute_measures(UpperCAmelCase , UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
86
1
from math import factorial def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float ): """simple docstring""" if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) A_ = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ = float(factorial(__UpperCamelCase ) ) coefficient /= factorial(__UpperCamelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
86
class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = None A_ = None A_ = graph self._normalize_graph(UpperCAmelCase , UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = None def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ): if sources is int: A_ = [sources] if sinks is int: A_ = [sinks] if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0: return A_ = sources[0] A_ = sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1: A_ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A_ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A_ = max_input_flow A_ = 0 A_ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A_ = max_input_flow A_ = size - 1 def __A ( self : str ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __A ( self : Tuple , UpperCAmelCase : List[Any] ): A_ = algorithm(self ) class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : List[str] ): A_ = flow_network A_ = flow_network.verticesCount A_ = flow_network.sourceIndex A_ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A_ = flow_network.graph A_ = False def __A ( self : Optional[int] ): if not self.executed: self._algorithm() A_ = True def __A ( self : Dict ): pass class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ): super().__init__(UpperCAmelCase ) # use this to save your result A_ = -1 def __A ( self : Tuple ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ): super().__init__(UpperCAmelCase ) A_ = [[0] * self.verticies_count for i in range(self.verticies_count )] A_ = [0] * self.verticies_count A_ = [0] * self.verticies_count def __A ( self : List[str] ): A_ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A_ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A_ = 0 while i < len(UpperCAmelCase ): A_ = vertices_list[i] A_ = self.heights[vertex_index] self.process_vertex(UpperCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) ) A_ = 0 else: i += 1 A_ = sum(self.preflow[self.source_index] ) def __A ( self : List[str] , UpperCAmelCase : Dict ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCAmelCase , UpperCAmelCase ) self.relabel(UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ): A_ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A_ = self.heights[to_index] if min_height is not None: A_ = min_height + 1 if __name__ == "__main__": __a :Tuple = [0] __a :Tuple = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __a :List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __a :List[Any] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
86
1
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def __snake_case ( __UpperCamelCase : bool = True ,*__UpperCamelCase : Tuple ,**__UpperCamelCase : Dict ): """simple docstring""" if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) A_ = False if main_process_only: A_ = PartialState().local_process_index == 0 return _tqdm(*__UpperCamelCase ,**__UpperCamelCase ,disable=__UpperCamelCase )
86
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :str = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
86
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a :str = logging.get_logger(__name__) __a :Any = Dict[str, Any] __a :int = List[Prediction] @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __A ( self : str , **UpperCAmelCase : str ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return super().__call__(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any ): A_ = load_image(UpperCAmelCase ) A_ = torch.IntTensor([[image.height, image.width]] ) A_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) A_ = target_size return inputs def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = model_inputs.pop("target_size" ) A_ = self.model(**UpperCAmelCase ) A_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: A_ = model_inputs["bbox"] return model_outputs def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ): A_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_ , A_ = target_size[0].tolist() def unnormalize(UpperCAmelCase : Any ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )] A_ = ["score", "label", "box"] A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = raw_annotations[0] A_ = raw_annotation["scores"] A_ = raw_annotation["labels"] A_ = raw_annotation["boxes"] A_ = scores.tolist() A_ = [self.model.config.idalabel[label.item()] for label in labels] A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ = ["score", "label", "box"] A_ = [ dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A_ = f'''{src_lang}-{tgt_lang}''' A_ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = os.path.join(__UpperCamelCase ,"README.md" ) print(f'''Generating {path}''' ) with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f: f.write(__UpperCamelCase ) # make sure we are under the root of the project __a :Optional[Any] = Path(__file__).resolve().parent.parent.parent __a :Optional[Any] = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __a , __a , __a :int = model_name.split('-') __a :str = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
86
1
import numpy as np class _a : """simple docstring""" def __init__( self : str ): A_ = (0, 0) A_ = None A_ = 0 A_ = 0 A_ = 0 def __eq__( self : str , UpperCAmelCase : str ): return self.position == cell.position def __A ( self : Union[str, Any] ): print(self.position ) class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any]=(5, 5) ): A_ = np.zeros(UpperCAmelCase ) A_ = world_size[0] A_ = world_size[1] def __A ( self : Dict ): print(self.w ) def __A ( self : List[Any] , UpperCAmelCase : List[str] ): A_ = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] A_ = cell.position[0] A_ = cell.position[1] A_ = [] for n in neughbour_cord: A_ = current_x + n[0] A_ = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: A_ = Cell() A_ = (x, y) A_ = cell neighbours.append(UpperCAmelCase ) return neighbours def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : str ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = [] A_ = [] _open.append(__UpperCamelCase ) while _open: A_ = np.argmin([n.f for n in _open] ) A_ = _open[min_f] _closed.append(_open.pop(__UpperCamelCase ) ) if current == goal: break for n in world.get_neigbours(__UpperCamelCase ): for c in _closed: if c == n: continue A_ = current.g + 1 A_ , A_ = n.position A_ , A_ = goal.position A_ = (ya - ya) ** 2 + (xa - xa) ** 2 A_ = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(__UpperCamelCase ) A_ = [] while current.parent is not None: path.append(current.position ) A_ = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": __a :List[Any] = Gridworld() # Start position and goal __a :str = Cell() __a :Union[str, Any] = (0, 0) __a :int = Cell() __a :int = (4, 4) print(F"path from {start.position} to {goal.position}") __a :List[str] = astar(world, start, goal) # Just for visual reasons. for i in s: __a :Tuple = 1 print(world.w)
86
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
86
1
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int ): """simple docstring""" A_ = len(__UpperCamelCase ) A_ = [[0] * n for i in range(__UpperCamelCase )] for i in range(__UpperCamelCase ): A_ = y_points[i] for i in range(2 ,__UpperCamelCase ): for j in range(__UpperCamelCase ,__UpperCamelCase ): A_ = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
86
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,) def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ): A_ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase ) return config def __A ( self : Optional[Any] ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def __A ( self : Dict ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def __A ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def __A ( self : Tuple ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase ) def __A ( self : int ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase ) def __A ( self : Union[str, Any] ): self.check_over_configs(thresholding=UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , ) def __A ( self : Optional[int] ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def __A ( self : Tuple ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = self.dummy_sample_deter + 0.1 A_ = self.dummy_sample_deter - 0.1 A_ = samplea.shape[0] A_ = torch.stack([samplea, samplea, samplea] , dim=0 ) A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase ) A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5_005 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config(prediction_type="v_prediction" ) A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def __A ( self : Union[str, Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase ) A_ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase ): if i == len(UpperCAmelCase ) - 1: A_ = -1 else: A_ = timesteps[i + 1] A_ = scheduler.previous_timestep(UpperCAmelCase ) A_ = prev_t.item() self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] A_ = len(UpperCAmelCase ) with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase )
86
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __a :List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[str] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ): """simple docstring""" with open(__UpperCamelCase ) as metadata_file: A_ = json.load(__UpperCamelCase ) A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) # Load the entity vocab file A_ = load_entity_vocab(__UpperCamelCase ) A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ) # Initialize the embeddings of the special tokens A_ = state_dict["embeddings.word_embeddings.weight"] A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) A_ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A_ = f'''encoder.layer.{layer_index}.attention.self.''' A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A_ = state_dict["entity_embeddings.entity_embeddings.weight"] A_ = entity_emb[entity_vocab["[MASK]"]] A_ = LukeModel(config=__UpperCamelCase ).eval() A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" ) A_ = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) A_ = (39, 42) A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" ) A_ = model(**__UpperCamelCase ) # Verify word hidden states if model_size == "large": A_ = torch.Size((1, 42, 1024) ) A_ = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base A_ = torch.Size((1, 42, 768) ) A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": A_ = torch.Size((1, 1, 1024) ) A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base A_ = torch.Size((1, 1, 768) ) A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__UpperCamelCase ) ) model.save_pretrained(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = {} with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: for index, line in enumerate(__UpperCamelCase ): A_ , A_ = line.rstrip().split("\t" ) A_ = index return entity_vocab if __name__ == "__main__": __a :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __a :Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
86
1
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" A_ = 2**power A_ = str(__UpperCamelCase ) A_ = list(__UpperCamelCase ) A_ = 0 for i in list_num: sum_of_num += int(__UpperCamelCase ) return sum_of_num if __name__ == "__main__": __a :Union[str, Any] = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) __a :List[str] = solution(power) print('Sum of the digits is: ', result)
86
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __a :Optional[Any] = 'true' def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ): """simple docstring""" set_seed(42 ) A_ = RegressionModel() A_ = deepcopy(__UpperCamelCase ) A_ = RegressionDataset(length=__UpperCamelCase ) A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase ) model.to(accelerator.device ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return model, ddp_model, dataloader def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ): """simple docstring""" A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) A_ = load_dataset("glue" ,"mrpc" ,split="validation" ) def tokenize_function(__UpperCamelCase : Optional[Any] ): A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ) return outputs with accelerator.main_process_first(): A_ = dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,) A_ = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__UpperCamelCase : Union[str, Any] ): if use_longest: return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" ) return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" ) return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ): """simple docstring""" A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase ) A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches ) A_ = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] for batch in dataloader: A_ , A_ = batch.values() with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ , A_ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A_ , A_ = [], [] for logit, targ in logits_and_targets: logits.append(__UpperCamelCase ) targs.append(__UpperCamelCase ) A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase ) return logits, targs def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ): """simple docstring""" A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) assert ( len(__UpperCamelCase ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}''' def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ): """simple docstring""" A_ = evaluate.load("glue" ,"mrpc" ) A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase ) # First do baseline A_ , A_ , A_ = setup["no"] model.to(__UpperCamelCase ) model.eval() for batch in dataloader: batch.to(__UpperCamelCase ) with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] ) A_ = metric.compute() # Then do distributed A_ , A_ , A_ = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) A_ = batch["labels"] A_ , A_ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase ) A_ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def __snake_case ( ): """simple docstring""" A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__UpperCamelCase ,__UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__UpperCamelCase ,99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) A_ = Accelerator() test_torch_metrics(__UpperCamelCase ,512 ) accelerator.state._reset_state() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
86
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :List[str] = logging.get_logger(__name__) __a :List[str] = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'open-llama' def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=100000 , UpperCAmelCase : Tuple=4096 , UpperCAmelCase : int=11008 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Optional[Any]="silu" , UpperCAmelCase : int=2048 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=1E-6 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=None , **UpperCAmelCase : str , ): A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = intermediate_size A_ = num_hidden_layers A_ = num_attention_heads A_ = hidden_act A_ = initializer_range A_ = rms_norm_eps A_ = use_cache A_ = kwargs.pop( "use_memorry_efficient_attention" , UpperCAmelCase ) A_ = hidden_dropout_prob A_ = attention_dropout_prob A_ = use_stable_embedding A_ = shared_input_output_embedding A_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase , ) def __A ( self : Dict ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) A_ = self.rope_scaling.get("type" , UpperCAmelCase ) A_ = self.rope_scaling.get("factor" , UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(UpperCAmelCase , UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
86
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __a :Optional[Any] = 'src/transformers' __a :Tuple = 'docs/source/en/tasks' def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.readlines() # Find the start prompt. A_ = 0 while not lines[start_index].startswith(__UpperCamelCase ): start_index += 1 start_index += 1 A_ = start_index while not lines[end_index].startswith(__UpperCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __a :List[str] = direct_transformers_import(TRANSFORMERS_PATH) __a :Optional[Any] = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __a :Optional[Any] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = TASK_GUIDE_TO_MODELS[task_guide] A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() ) A_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ , A_ , A_ , A_ = _find_text_in_file( filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,) A_ = get_model_list_for_task(__UpperCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' " to fix this." ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a :Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
86
1
import random from typing import Any def __snake_case ( __UpperCamelCase : list ): """simple docstring""" for _ in range(len(__UpperCamelCase ) ): A_ = random.randint(0 ,len(__UpperCamelCase ) - 1 ) A_ = random.randint(0 ,len(__UpperCamelCase ) - 1 ) A_ , A_ = data[b], data[a] return data if __name__ == "__main__": __a :List[Any] = [0, 1, 2, 3, 4, 5, 6, 7] __a :List[str] = ['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
86
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __a :Dict = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = ViTConfig() A_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A_ = True A_ = int(vit_name[-12:-10] ) A_ = int(vit_name[-9:-6] ) else: A_ = 1000 A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} A_ = int(vit_name[-6:-4] ) A_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A_ = 192 A_ = 768 A_ = 12 A_ = 3 elif vit_name[9:].startswith("small" ): A_ = 384 A_ = 1536 A_ = 12 A_ = 6 else: pass else: if vit_name[4:].startswith("small" ): A_ = 768 A_ = 2304 A_ = 8 A_ = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 elif vit_name[4:].startswith("huge" ): A_ = 1280 A_ = 5120 A_ = 32 A_ = 16 # load original model from timm A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": A_ = ViTModel(__UpperCamelCase ).eval() else: A_ = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A_ = DeiTImageProcessor(size=config.image_size ) else: A_ = ViTImageProcessor(size=config.image_size ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = encoding["pixel_values"] A_ = model(__UpperCamelCase ) if base_model: A_ = timm_model.forward_features(__UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 ) else: A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[int] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
86
1
__a :Tuple = '0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
86
def __snake_case ( __UpperCamelCase : int = 50 ): """simple docstring""" A_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
86
1
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [0 for i in range(r + 1 )] # nc0 = 1 A_ = 1 for i in range(1 ,n + 1 ): # to compute current row from previous row. A_ = min(__UpperCamelCase ,__UpperCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
86
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __a :List[str] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , **UpperCAmelCase : List[str] ): super().__init__(**UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type(UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ): if "text_queries" in kwargs: A_ = kwargs.pop("text_queries" ) if isinstance(UpperCAmelCase , (str, Image.Image) ): A_ = {"image": image, "candidate_labels": candidate_labels} else: A_ = image A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase ) return results def __A ( self : int , **UpperCAmelCase : Tuple ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] if "top_k" in kwargs: A_ = kwargs["top_k"] return {}, {}, postprocess_params def __A ( self : List[str] , UpperCAmelCase : Dict ): A_ = load_image(inputs["image"] ) A_ = inputs["candidate_labels"] if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = candidate_labels.split("," ) A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase ): A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework ) A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self : str , UpperCAmelCase : int ): A_ = model_inputs.pop("target_size" ) A_ = model_inputs.pop("candidate_label" ) A_ = model_inputs.pop("is_last" ) A_ = self.model(**UpperCAmelCase ) A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ): A_ = [] for model_output in model_outputs: A_ = model_output["candidate_label"] A_ = BaseModelOutput(UpperCAmelCase ) A_ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): A_ = outputs["scores"][index].item() A_ = self._get_bounding_box(outputs["boxes"][index][0] ) A_ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase ) A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase ) if top_k: A_ = results[:top_k] return results def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = LEDTokenizer _lowerCamelCase : str = LEDTokenizerFast _lowerCamelCase : List[Any] = True def __A ( self : Optional[int] ): super().setUp() A_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) A_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] A_ = {"unk_token": "<unk>"} A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase ) ) def __A ( self : Optional[Any] , **UpperCAmelCase : Optional[int] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Tuple , **UpperCAmelCase : int ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : Dict ): return "lower newer", "lower newer" @cached_property def __A ( self : Any ): return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __A ( self : Union[str, Any] ): return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __A ( self : str ): A_ = ["A long paragraph for summarization.", "Another paragraph for summarization."] A_ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A_ = tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors="pt" ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) A_ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_torch def __A ( self : Optional[int] ): A_ = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" ) self.assertIn("input_ids" , UpperCAmelCase ) self.assertIn("attention_mask" , UpperCAmelCase ) self.assertNotIn("labels" , UpperCAmelCase ) self.assertNotIn("decoder_attention_mask" , UpperCAmelCase ) @require_torch def __A ( self : Tuple ): A_ = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A_ = tokenizer(text_target=UpperCAmelCase , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __A ( self : List[Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A_ = tokenizer( ["I am a small frog" * 1024, "I am a small frog"] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def __A ( self : Any ): A_ = ["A long paragraph for summarization."] A_ = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A_ = tokenizer(UpperCAmelCase , return_tensors="pt" ) A_ = tokenizer(text_target=UpperCAmelCase , return_tensors="pt" ) A_ = inputs["input_ids"] A_ = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __A ( self : List[str] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A_ = ["Summary of the text.", "Another summary."] A_ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] A_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase ) A_ = [[0] * len(UpperCAmelCase ) for x in encoded_output["input_ids"]] A_ = tokenizer.pad(UpperCAmelCase ) self.assertSequenceEqual(outputs["global_attention_mask"] , UpperCAmelCase ) def __A ( self : str ): pass def __A ( self : Tuple ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = "A, <mask> AllenNLP sentence." A_ = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) A_ = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) A_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) A_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
86
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) __a :int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __a :Tuple = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ): """simple docstring""" A_ = torch.load(__UpperCamelCase ) A_ = WavLMConfigOrig(checkpoint["cfg"] ) A_ = WavLMOrig(__UpperCamelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: A_ = WavLMConfig.from_pretrained(__UpperCamelCase ) else: A_ = WavLMConfig() A_ = WavLMModel(__UpperCamelCase ) recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ) hf_wavlm.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a :Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
1
from __future__ import annotations __a :List[Any] = list[list[int]] # assigning initial values to the grid __a :Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __a :Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def __snake_case ( __UpperCamelCase : Matrix ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def __snake_case ( __UpperCamelCase : Matrix ): """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def __snake_case ( __UpperCamelCase : Matrix ): """simple docstring""" if location := find_empty_location(__UpperCamelCase ): A_ , A_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): A_ = digit if sudoku(__UpperCamelCase ) is not None: return grid A_ = 0 return None def __snake_case ( __UpperCamelCase : Matrix ): """simple docstring""" for row in grid: for cell in row: print(__UpperCamelCase ,end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __a :int = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
86
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ): """simple docstring""" A_ = length or len(__UpperCamelCase ) A_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A_ , A_ = list_data[i + 1], list_data[i] A_ = True return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
def __snake_case ( __UpperCamelCase : list[list[int]] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : set ): """simple docstring""" A_ , A_ = len(__UpperCamelCase ), len(grid[0] ) if ( min(__UpperCamelCase ,__UpperCamelCase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) A_ = 0 count += depth_first_search(__UpperCamelCase ,row + 1 ,__UpperCamelCase ,__UpperCamelCase ) count += depth_first_search(__UpperCamelCase ,row - 1 ,__UpperCamelCase ,__UpperCamelCase ) count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col + 1 ,__UpperCamelCase ) count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col - 1 ,__UpperCamelCase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
86
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , 0.1 ) A_ = Accelerator() A_ = accelerator.prepare(UpperCAmelCase ) try: pickle.loads(pickle.dumps(UpperCAmelCase ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
86
1
import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 A_ = 0 while num > 0: A_ = num % 8 A_ = octal + (remainder * math.floor(math.pow(10 ,__UpperCamelCase ) )) counter += 1 A_ = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f'''0o{int(__UpperCamelCase )}''' def __snake_case ( ): """simple docstring""" print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
86
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a :List[str] = logging.get_logger(__name__) __a :Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __a :Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ = None for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True elif name.split("." )[0] == "proj": A_ = fairseq_model.proj A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.split(" " )[0] for line in lines] A_ = len(__UpperCamelCase ) A_ = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,): """simple docstring""" A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase ) A_ = SpeechaTextaConfig.from_pretrained( __UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase ) A_ = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ = model[0].eval() # set weights for wav2vec2 encoder A_ = WavaVecaModel(__UpperCamelCase ) A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase ) A_ = SpeechaTextaForCausalLM(__UpperCamelCase ) A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase ) A_ = False # add projection layer A_ = nn.Parameter(projection_layer.weight ) A_ = nn.Parameter(projection_layer.bias ) A_ = create_vocab_dict(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) ) tokenizer.save_pretrained(__UpperCamelCase ) A_ = hf_wavavec.config.to_dict() A_ = tokenizer.pad_token_id A_ = tokenizer.bos_token_id A_ = tokenizer.eos_token_id A_ = "speech_to_text_2" A_ = "wav2vec2" A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) feature_extractor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __a :Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
86
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __a :int = 'CompVis/stable-diffusion-v1-1' __a :Optional[int] = 'CompVis/stable-diffusion-v1-2' __a :List[Any] = 'CompVis/stable-diffusion-v1-3' __a :int = 'CompVis/stable-diffusion-v1-4' class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : StableDiffusionSafetyChecker , UpperCAmelCase : CLIPImageProcessor , UpperCAmelCase : bool = True , ): super()._init_() A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase ) A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase ) A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase ) A_ = StableDiffusionPipeline( vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , requires_safety_checker=UpperCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __A ( self : Union[str, Any] ): return {k: getattr(self , UpperCAmelCase ) for k in self.config.keys() if not k.startswith("_" )} def __A ( self : str , UpperCAmelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase ) def __A ( self : List[Any] ): self.enable_attention_slicing(UpperCAmelCase ) @torch.no_grad() def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Any , ): return self.pipea( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) @torch.no_grad() def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Dict , ): return self.pipea( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) @torch.no_grad() def __A ( self : Dict , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Tuple , ): return self.pipea( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) @torch.no_grad() def __A ( self : List[str] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Tuple , ): return self.pipea( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) @torch.no_grad() def __A ( self : str , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Any , ): A_ = "cuda" if torch.cuda.is_available() else "cpu" self.to(UpperCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 A_ = self.textaimg_sda_a( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 A_ = self.textaimg_sda_a( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 A_ = self.textaimg_sda_a( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 A_ = self.textaimg_sda_a( prompt=UpperCAmelCase , height=UpperCAmelCase , width=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , output_type=UpperCAmelCase , return_dict=UpperCAmelCase , callback=UpperCAmelCase , callback_steps=UpperCAmelCase , **UpperCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
86
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a :str = logging.get_logger(__name__) __a :Any = Dict[str, Any] __a :int = List[Prediction] @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __A ( self : str , **UpperCAmelCase : str ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return super().__call__(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any ): A_ = load_image(UpperCAmelCase ) A_ = torch.IntTensor([[image.height, image.width]] ) A_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) A_ = target_size return inputs def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = model_inputs.pop("target_size" ) A_ = self.model(**UpperCAmelCase ) A_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: A_ = model_inputs["bbox"] return model_outputs def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ): A_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_ , A_ = target_size[0].tolist() def unnormalize(UpperCAmelCase : Any ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )] A_ = ["score", "label", "box"] A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = raw_annotations[0] A_ = raw_annotation["scores"] A_ = raw_annotation["labels"] A_ = raw_annotation["boxes"] A_ = scores.tolist() A_ = [self.model.config.idalabel[label.item()] for label in labels] A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ = ["score", "label", "box"] A_ = [ dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , snake_case_ , ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = RobertaConfig _lowerCamelCase : Dict = 'roberta' def __init__( self : Tuple , UpperCAmelCase : Tuple ): super().__init__(UpperCAmelCase ) A_ = RobertaEmbeddings(UpperCAmelCase ) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case_ , ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : str = RobertaConfig _lowerCamelCase : List[str] = 'roberta' def __init__( self : List[str] , UpperCAmelCase : List[Any] ): super().__init__(UpperCAmelCase ) A_ = config.num_labels A_ = config.num_hidden_layers A_ = DeeRobertaModel(UpperCAmelCase ) A_ = nn.Dropout(config.hidden_dropout_prob ) A_ = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=-1 , UpperCAmelCase : Optional[int]=False , ): A_ = self.num_layers try: A_ = self.roberta( UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , ) A_ = outputs[1] A_ = self.dropout(UpperCAmelCase ) A_ = self.classifier(UpperCAmelCase ) A_ = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: A_ = e.message A_ = e.exit_layer A_ = outputs[0] if not self.training: A_ = entropy(UpperCAmelCase ) A_ = [] A_ = [] if labels is not None: if self.num_labels == 1: # We are doing regression A_ = MSELoss() A_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: A_ = CrossEntropyLoss() A_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits A_ = [] for highway_exit in outputs[-1]: A_ = highway_exit[0] if not self.training: highway_logits_all.append(UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression A_ = MSELoss() A_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: A_ = CrossEntropyLoss() A_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCAmelCase ) if train_highway: A_ = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: A_ = (loss,) + outputs if not self.training: A_ = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: A_ = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ , A_ = image.size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image[None].transpose(0 ,3 ,1 ,2 ) A_ = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = 1 elif isinstance(UpperCAmelCase , torch.Tensor ): A_ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' ) if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = preprocess(UpperCAmelCase ) A_ , A_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A_ = (batch_size, self.unet.config.in_channels // 2, height, width) A_ = next(self.unet.parameters() ).dtype A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) A_ = image.to(device=self.device , dtype=UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase , device=self.device ) A_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for t in self.progress_bar(UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. A_ = torch.cat([latents, image] , dim=1 ) A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE A_ = self.vqvae.decode(UpperCAmelCase ).sample A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 ) A_ = image / 2 + 0.5 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
86
1
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = RoCBertTokenizer _lowerCamelCase : str = None _lowerCamelCase : Dict = False _lowerCamelCase : Tuple = True _lowerCamelCase : List[str] = filter_non_english def __A ( self : List[Any] ): super().setUp() A_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] A_ = {} A_ = {} for i, value in enumerate(UpperCAmelCase ): A_ = i A_ = i A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase ) def __A ( self : Dict ): A_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A_ = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] ) def __A ( self : Optional[Any] ): A_ = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __A ( self : Optional[int] ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __A ( self : Dict ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __A ( self : Optional[Any] ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __A ( self : Dict ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __A ( self : List[str] ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __A ( self : int ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __A ( self : Optional[Any] ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __A ( self : List[str] ): A_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __A ( self : Optional[Any] ): A_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] A_ = {} for i, token in enumerate(UpperCAmelCase ): A_ = i A_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __A ( self : Dict ): self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __A ( self : Any ): self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __A ( self : Dict ): self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __A ( self : List[str] ): A_ = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: A_ = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def __A ( self : List[str] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' A_ = tokenizer_r.encode_plus( UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , ) A_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , "do_lower_case" ) else False A_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __A ( self : Tuple ): A_ = ["的", "人", "有"] A_ = "".join(UpperCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = True A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase ) A_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = False A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase ) A_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". A_ = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCAmelCase ) ] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : List[str] ): A_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A_ = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def __A ( self : Any ): A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A_ = "你好,你是谁" A_ = tokenizer.tokenize(UpperCAmelCase ) A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) A_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) A_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) A_ = tokenizer.prepare_for_model( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase )
86
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __a :list[bool | None] = [None] * 1000_0000 __a :Optional[Any] = True __a :List[Any] = False def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ = chain(next_number(__UpperCamelCase ) ) A_ = number_chain while number < 1000_0000: A_ = number_chain number *= 10 return number_chain def __snake_case ( __UpperCamelCase : int = 1000_0000 ): """simple docstring""" for i in range(1 ,__UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
86
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a :Any = { 'configuration_chinese_clip': [ 'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ChineseCLIPConfig', 'ChineseCLIPOnnxConfig', 'ChineseCLIPTextConfig', 'ChineseCLIPVisionConfig', ], 'processing_chinese_clip': ['ChineseCLIPProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = ['ChineseCLIPFeatureExtractor'] __a :List[Any] = ['ChineseCLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ChineseCLIPModel', 'ChineseCLIPPreTrainedModel', 'ChineseCLIPTextModel', 'ChineseCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :List[Any] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
1
def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) A_ = [True] * (num + 1) A_ = 2 while p * p <= num: if primes[p]: for i in range(p * p ,num + 1 ,__UpperCamelCase ): A_ = False p += 1 return [prime for prime in range(2 ,num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __a :List[Any] = int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
86
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :List[Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Optional[int] , UpperCAmelCase : int ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : List[str] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : Any , UpperCAmelCase : Dict ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
86
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __a :Optional[Any] = 'src/transformers' __a :Tuple = 'docs/source/en/tasks' def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.readlines() # Find the start prompt. A_ = 0 while not lines[start_index].startswith(__UpperCamelCase ): start_index += 1 start_index += 1 A_ = start_index while not lines[end_index].startswith(__UpperCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __a :List[str] = direct_transformers_import(TRANSFORMERS_PATH) __a :Optional[Any] = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __a :Optional[Any] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = TASK_GUIDE_TO_MODELS[task_guide] A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() ) A_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ , A_ , A_ , A_ = _find_text_in_file( filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,) A_ = get_model_list_for_task(__UpperCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' " to fix this." ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a :Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
86
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __a :Any = logging.getLogger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ): super().__init__( UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , ) A_ = None def __A ( self : Dict , UpperCAmelCase : int ): logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually A_ = self._infer_socket_ifname() # avoid clash with the NCCL port A_ = str(distributed_port + 1 ) A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __A ( self : List[str] ): return dist.get_rank(group=self.process_group ) == 0 def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ): A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase ) dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group ) return target_tensor def __A ( self : Any ): A_ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase ) return ifname def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ): # single GPU training if not dist.is_initialized(): A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase ) # distributed training A_ = dist.get_world_size(group=self.process_group ) # gather logic A_ = None if self._is_main(): A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )] dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group ) # scatter logic A_ = question_hidden_states.shape[0] A_ = [] A_ = [] if self._is_main(): assert len(UpperCAmelCase ) == world_size A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase ) A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
86
1
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ): """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any]=0 ): """simple docstring""" return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[column] ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ): """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 ,__UpperCamelCase ): A_ = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: A_ = current_dis return min_dis def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ): """simple docstring""" for i in range(min(6 ,points_counts - 1 ) ,__UpperCamelCase ): for j in range(max(0 ,i - 6 ) ,__UpperCamelCase ): A_ = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: A_ = current_dis return min_dis def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ): """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(__UpperCamelCase ,__UpperCamelCase ) # recursion A_ = points_counts // 2 A_ = closest_pair_of_points_sqr( __UpperCamelCase ,points_sorted_on_y[:mid] ,__UpperCamelCase ) A_ = closest_pair_of_points_sqr( __UpperCamelCase ,points_sorted_on_y[mid:] ,points_counts - mid ) A_ = min(__UpperCamelCase ,__UpperCamelCase ) A_ = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__UpperCamelCase ) A_ = dis_between_closest_in_strip( __UpperCamelCase ,len(__UpperCamelCase ) ,__UpperCamelCase ) return min(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ): """simple docstring""" A_ = column_based_sort(__UpperCamelCase ,column=0 ) A_ = column_based_sort(__UpperCamelCase ,column=1 ) return ( closest_pair_of_points_sqr( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ) ** 0.5 if __name__ == "__main__": __a :List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
86
from jiwer import compute_measures import datasets __a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ): if concatenate_texts: return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"] else: A_ = 0 A_ = 0 for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ): A_ = compute_measures(UpperCAmelCase , UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
86
1
from collections.abc import Callable import numpy as np def __snake_case ( __UpperCamelCase : Callable ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" A_ = int(np.ceil((x_end - xa) / step_size ) ) A_ = np.zeros((n + 1,) ) A_ = ya A_ = xa for k in range(__UpperCamelCase ): A_ = y[k] + step_size * ode_func(__UpperCamelCase ,y[k] ) A_ = y[k] + ( (step_size / 2) * (ode_func(__UpperCamelCase ,y[k] ) + ode_func(x + step_size ,__UpperCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
86
class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = None A_ = None A_ = graph self._normalize_graph(UpperCAmelCase , UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = None def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ): if sources is int: A_ = [sources] if sinks is int: A_ = [sinks] if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0: return A_ = sources[0] A_ = sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1: A_ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A_ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A_ = max_input_flow A_ = 0 A_ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A_ = max_input_flow A_ = size - 1 def __A ( self : str ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __A ( self : Tuple , UpperCAmelCase : List[Any] ): A_ = algorithm(self ) class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : List[str] ): A_ = flow_network A_ = flow_network.verticesCount A_ = flow_network.sourceIndex A_ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A_ = flow_network.graph A_ = False def __A ( self : Optional[int] ): if not self.executed: self._algorithm() A_ = True def __A ( self : Dict ): pass class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ): super().__init__(UpperCAmelCase ) # use this to save your result A_ = -1 def __A ( self : Tuple ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ): super().__init__(UpperCAmelCase ) A_ = [[0] * self.verticies_count for i in range(self.verticies_count )] A_ = [0] * self.verticies_count A_ = [0] * self.verticies_count def __A ( self : List[str] ): A_ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A_ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A_ = 0 while i < len(UpperCAmelCase ): A_ = vertices_list[i] A_ = self.heights[vertex_index] self.process_vertex(UpperCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) ) A_ = 0 else: i += 1 A_ = sum(self.preflow[self.source_index] ) def __A ( self : List[str] , UpperCAmelCase : Dict ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCAmelCase , UpperCAmelCase ) self.relabel(UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ): A_ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A_ = self.heights[to_index] if min_height is not None: A_ = min_height + 1 if __name__ == "__main__": __a :Tuple = [0] __a :Tuple = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __a :List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __a :List[Any] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
86
1
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'torchsde'] def __init__( self : Any , *UpperCAmelCase : int , **UpperCAmelCase : int ): requires_backends(self , ["torch", "torchsde"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "torchsde"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "torchsde"] )
86
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :str = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
86
1
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : str = TypeVar("""T""") SCREAMING_SNAKE_CASE__ : int = TypeVar("""U""") class lowerCamelCase_ ( Generic[T, U] ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = key __magic_name__ :List[str] = val __magic_name__ :DoubleLinkedListNode[T, U] | None = None __magic_name__ :DoubleLinkedListNode[T, U] | None = None def __repr__( self ): """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase_ ( Generic[T, U] ): def __init__( self ): """simple docstring""" __magic_name__ :DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase ) __magic_name__ , __magic_name__ :Union[str, Any] = self.rear, self.head def __repr__( self ): """simple docstring""" __magic_name__ :Any = ['''DoubleLinkedList'''] __magic_name__ :Any = self.head while node.next is not None: rep.append(str(__lowerCAmelCase ) ) __magic_name__ :Optional[int] = node.next rep.append(str(self.rear ) ) return ",\n ".join(__lowerCAmelCase ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[Any] = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None __magic_name__ :str = node __magic_name__ :str = previous __magic_name__ :Dict = node __magic_name__ :Optional[Any] = self.rear def A ( self , __lowerCAmelCase ): """simple docstring""" if node.prev is None or node.next is None: return None __magic_name__ :str = node.next __magic_name__ :Any = node.prev __magic_name__ :int = None __magic_name__ :List[str] = None return node class lowerCamelCase_ ( Generic[T, U] ): a__ = {} def __init__( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :DoubleLinkedList[T, U] = DoubleLinkedList() __magic_name__ :Dict = capacity __magic_name__ :Union[str, Any] = 0 __magic_name__ :Dict = 0 __magic_name__ :Optional[Any] = 0 __magic_name__ :dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ): """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , __lowerCAmelCase ): """simple docstring""" return key in self.cache def A ( self , __lowerCAmelCase ): """simple docstring""" # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 __magic_name__ :DoubleLinkedListNode[T, U] = self.cache[key] __magic_name__ :int = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(__lowerCAmelCase ) return node.val self.miss += 1 return None def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity __magic_name__ :Union[str, Any] = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(__lowerCAmelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 __magic_name__ :List[Any] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value __magic_name__ :str = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list __magic_name__ :Any = value self.list.add(__lowerCAmelCase ) @classmethod def A ( cls , __lowerCAmelCase = 1_2_8 ): """simple docstring""" def cache_decorator_inner(__lowerCAmelCase ) -> Callable[..., U]: def cache_decorator_wrapper(*__lowerCAmelCase ) -> U: if func not in cls.decorator_function_to_instance_map: __magic_name__ :List[str] = LRUCache(__lowerCAmelCase ) __magic_name__ :Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: __magic_name__ :Optional[Any] = func(*__lowerCAmelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(__lowerCAmelCase , '''cache_info''' , __lowerCAmelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
0
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A_ = f'''{src_lang}-{tgt_lang}''' A_ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = os.path.join(__UpperCamelCase ,"README.md" ) print(f'''Generating {path}''' ) with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f: f.write(__UpperCamelCase ) # make sure we are under the root of the project __a :Optional[Any] = Path(__file__).resolve().parent.parent.parent __a :Optional[Any] = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __a , __a , __a :int = model_name.split('-') __a :str = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
86
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class __lowerCamelCase (_a ): _lowercase = (DPMSolverSDEScheduler,) _lowercase = 10 def snake_case_ ( self: Tuple,**A_: List[str] ): '''simple docstring''' __UpperCamelCase = { 'num_train_timesteps': 1100, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**A_ ) return config def snake_case_ ( self: List[Any] ): '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def snake_case_ ( self: str ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1],[0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=A_,beta_end=A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=A_ ) def snake_case_ ( self: List[str] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCamelCase = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) __UpperCamelCase = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCamelCase = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3 def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps,device=A_ ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**A_,use_karras_sigmas=A_ ) scheduler.set_timesteps(self.num_inference_steps,device=A_ ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma __UpperCamelCase = sample.to(A_ ) for t in scheduler.timesteps: __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
1
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
86
0
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : int=36 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : int=6 , __lowerCAmelCase : Tuple=6 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=10_00 , ) -> List[str]: _A = parent _A = batch_size _A = num_channels _A = image_size _A = patch_size _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = coordinate_size _A = shape_size _A = num_labels _A = num_choices _A = scope _A = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _A = text_seq_length _A = (image_size // patch_size) ** 2 + 1 _A = self.text_seq_length + self.image_seq_length def snake_case_ ( self : List[Any] ) -> Dict: _A = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _A = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) _A = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _A = bbox[i, j, 3] _A = bbox[i, j, 1] _A = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: _A = bbox[i, j, 2] _A = bbox[i, j, 0] _A = tmp_coordinate _A = tf.constant(__lowerCAmelCase ) _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.text_seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _A = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: _A = TFLayoutLMvaModel(config=__lowerCAmelCase ) # text + image _A = model(__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase ) _A = model( __lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , training=__lowerCAmelCase , ) _A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _A = model(__lowerCAmelCase , training=__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _A = model({'''pixel_values''': pixel_values} , training=__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def snake_case_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> List[str]: _A = self.num_labels _A = TFLayoutLMvaForSequenceClassification(config=__lowerCAmelCase ) _A = model( __lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = self.num_labels _A = TFLayoutLMvaForTokenClassification(config=__lowerCAmelCase ) _A = model( __lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def snake_case_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Optional[int]: _A = 2 _A = TFLayoutLMvaForQuestionAnswering(config=__lowerCAmelCase ) _A = model( __lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , training=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ ( self : Tuple ) -> Union[str, Any]: _A = self.prepare_config_and_inputs() ((_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs _A = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( _A , _A , unittest.TestCase): """simple docstring""" a__ : Any = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) a__ : str = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) a__ : Tuple = False a__ : List[Any] = False a__ : int = False def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: return True def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=False ) -> dict: _A = copy.deepcopy(__lowerCAmelCase ) if model_class in get_values(__lowerCAmelCase ): _A = { k: tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__lowerCAmelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__lowerCAmelCase ): _A = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__lowerCAmelCase ): _A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) _A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__lowerCAmelCase ): _A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__lowerCAmelCase ): _A = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def snake_case_ ( self : Any ) -> List[str]: _A = TFLayoutLMvaModelTester(self ) _A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def snake_case_ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() def snake_case_ ( self : str ) -> int: _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__lowerCAmelCase ) if getattr(__lowerCAmelCase , '''hf_compute_loss''' , __lowerCAmelCase ): # The number of elements in the loss should be the same as the number of elements in the label _A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__lowerCAmelCase )[0] ] _A = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs _A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = prepared_for_class.pop('''input_ids''' ) _A = model(__lowerCAmelCase , **__lowerCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions _A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: _A = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: _A = -1_00 _A = tf.convert_to_tensor(__lowerCAmelCase ) _A = model(__lowerCAmelCase , **__lowerCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict _A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _A = model(__lowerCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple _A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase ) # Get keys that were added with the _prepare_for_class function _A = prepared_for_class.keys() - inputs_dict.keys() _A = inspect.signature(model.call ).parameters _A = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple _A = {0: '''input_ids'''} for label_key in label_keys: _A = signature_names.index(__lowerCAmelCase ) _A = label_key _A = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple _A = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: _A = prepared_for_class[value] _A = tuple(__lowerCAmelCase ) # Send to model _A = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def snake_case_ ( self : Tuple ) -> Optional[int]: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict ) -> int: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A = type self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> Dict: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict ) -> List[str]: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> Optional[Any]: ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : Dict ) -> Union[str, Any]: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFLayoutLMvaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @cached_property def snake_case_ ( self : List[Any] ) -> Union[str, Any]: return LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase ) if is_vision_available() else None @slow def snake_case_ ( self : Union[str, Any] ) -> Any: _A = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' ).pixel_values _A = tf.constant([[1, 2]] ) _A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass _A = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase ) # verify the logits _A = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __lowerCAmelCase ) _A = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
2
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,) def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ): A_ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase ) return config def __A ( self : Optional[Any] ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def __A ( self : Dict ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def __A ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def __A ( self : Tuple ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase ) def __A ( self : int ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase ) def __A ( self : Union[str, Any] ): self.check_over_configs(thresholding=UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , ) def __A ( self : Optional[int] ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def __A ( self : Tuple ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = self.dummy_sample_deter + 0.1 A_ = self.dummy_sample_deter - 0.1 A_ = samplea.shape[0] A_ = torch.stack([samplea, samplea, samplea] , dim=0 ) A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase ) A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5_005 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config(prediction_type="v_prediction" ) A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def __A ( self : Union[str, Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase ) A_ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase ): if i == len(UpperCAmelCase ) - 1: A_ = -1 else: A_ = timesteps[i + 1] A_ = scheduler.previous_timestep(UpperCAmelCase ) A_ = prev_t.item() self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] A_ = len(UpperCAmelCase ) with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase )
86
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer lowerCAmelCase : Tuple = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast lowerCAmelCase : Optional[int] = TaTokenizerFast lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys lowerCAmelCase : Tuple = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
3
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ): """simple docstring""" with open(__UpperCamelCase ) as metadata_file: A_ = json.load(__UpperCamelCase ) A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) # Load the entity vocab file A_ = load_entity_vocab(__UpperCamelCase ) A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ) # Initialize the embeddings of the special tokens A_ = state_dict["embeddings.word_embeddings.weight"] A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) A_ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A_ = f'''encoder.layer.{layer_index}.attention.self.''' A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A_ = state_dict["entity_embeddings.entity_embeddings.weight"] A_ = entity_emb[entity_vocab["[MASK]"]] A_ = LukeModel(config=__UpperCamelCase ).eval() A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" ) A_ = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) A_ = (39, 42) A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" ) A_ = model(**__UpperCamelCase ) # Verify word hidden states if model_size == "large": A_ = torch.Size((1, 42, 1024) ) A_ = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base A_ = torch.Size((1, 42, 768) ) A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": A_ = torch.Size((1, 1, 1024) ) A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base A_ = torch.Size((1, 1, 768) ) A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__UpperCamelCase ) ) model.save_pretrained(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = {} with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: for index, line in enumerate(__UpperCamelCase ): A_ , A_ = line.rstrip().split("\t" ) A_ = index return entity_vocab if __name__ == "__main__": __a :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __a :Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
86
0
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" super().__init__(*_snake_case , **_snake_case ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCamelCase__ ( self , _snake_case=None , _snake_case=None , _snake_case=None ): """simple docstring""" lowerCAmelCase = {} lowerCAmelCase = {} if prompt is not None: lowerCAmelCase = prompt if generate_kwargs is not None: lowerCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) lowerCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _snake_case , **_snake_case ): """simple docstring""" return super().__call__(_snake_case , **_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = load_image(_snake_case ) if prompt is not None: if not isinstance(_snake_case , _snake_case ): raise ValueError( F'Received an invalid text input, got - {type(_snake_case )} - but expected a single string. ' 'Note also that one single text can be provided for conditional image to text generation.' ) lowerCAmelCase = self.model.config.model_type if model_type == "git": lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework ) lowerCAmelCase = self.tokenizer(text=_snake_case , add_special_tokens=_snake_case ).input_ids lowerCAmelCase = [self.tokenizer.cls_token_id] + input_ids lowerCAmelCase = torch.tensor(_snake_case ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": lowerCAmelCase = self.image_processor(images=_snake_case , header_text=_snake_case , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework ) lowerCAmelCase = self.tokenizer(_snake_case , return_tensors=self.framework ) model_inputs.update(_snake_case ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCAmelCase = None return model_inputs def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _snake_case ) and all(x is None for x in model_inputs['input_ids'] ) ): lowerCAmelCase = None if generate_kwargs is None: lowerCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCAmelCase = model_inputs.pop(self.model.main_input_name ) lowerCAmelCase = self.model.generate(_snake_case , **_snake_case , **_snake_case ) return model_outputs def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = [] for output_ids in model_outputs: lowerCAmelCase = { 'generated_text': self.tokenizer.decode( _snake_case , skip_special_tokens=_snake_case , ) } records.append(_snake_case ) return records
4
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __a :Optional[Any] = 'true' def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ): """simple docstring""" set_seed(42 ) A_ = RegressionModel() A_ = deepcopy(__UpperCamelCase ) A_ = RegressionDataset(length=__UpperCamelCase ) A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase ) model.to(accelerator.device ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return model, ddp_model, dataloader def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ): """simple docstring""" A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) A_ = load_dataset("glue" ,"mrpc" ,split="validation" ) def tokenize_function(__UpperCamelCase : Optional[Any] ): A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ) return outputs with accelerator.main_process_first(): A_ = dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,) A_ = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__UpperCamelCase : Union[str, Any] ): if use_longest: return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" ) return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" ) return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ): """simple docstring""" A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase ) A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches ) A_ = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] for batch in dataloader: A_ , A_ = batch.values() with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ , A_ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A_ , A_ = [], [] for logit, targ in logits_and_targets: logits.append(__UpperCamelCase ) targs.append(__UpperCamelCase ) A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase ) return logits, targs def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ): """simple docstring""" A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) assert ( len(__UpperCamelCase ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}''' def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ): """simple docstring""" A_ = evaluate.load("glue" ,"mrpc" ) A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase ) # First do baseline A_ , A_ , A_ = setup["no"] model.to(__UpperCamelCase ) model.eval() for batch in dataloader: batch.to(__UpperCamelCase ) with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] ) A_ = metric.compute() # Then do distributed A_ , A_ , A_ = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) A_ = batch["labels"] A_ , A_ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase ) A_ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def __snake_case ( ): """simple docstring""" A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__UpperCamelCase ,__UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__UpperCamelCase ,99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) A_ = Accelerator() test_torch_metrics(__UpperCamelCase ,512 ) accelerator.state._reset_state() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
86
0
'''simple docstring''' from __future__ import annotations def A (__lowerCamelCase :list[int | float] , __lowerCamelCase :int , __lowerCamelCase :int ): if len(__lowerCamelCase ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(__lowerCamelCase ) or left < -len(__lowerCamelCase ) or right >= len(__lowerCamelCase ) or right < -len(__lowerCamelCase ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] _lowerCAmelCase = (left + right) >> 1 # the middle _lowerCAmelCase = find_max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # find max in range[left, mid] _lowerCAmelCase = find_max(__lowerCamelCase , mid + 1 , __lowerCamelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
5
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __a :Optional[Any] = 'src/transformers' __a :Tuple = 'docs/source/en/tasks' def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.readlines() # Find the start prompt. A_ = 0 while not lines[start_index].startswith(__UpperCamelCase ): start_index += 1 start_index += 1 A_ = start_index while not lines[end_index].startswith(__UpperCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __a :List[str] = direct_transformers_import(TRANSFORMERS_PATH) __a :Optional[Any] = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __a :Optional[Any] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = TASK_GUIDE_TO_MODELS[task_guide] A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() ) A_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ , A_ , A_ , A_ = _find_text_in_file( filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,) A_ = get_model_list_for_task(__UpperCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' " to fix this." ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a :Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
86
0
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any]=None ): SCREAMING_SNAKE_CASE__ = None if token is not None: SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} SCREAMING_SNAKE_CASE__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() SCREAMING_SNAKE_CASE__ = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str=None ): SCREAMING_SNAKE_CASE__ = None if token is not None: SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} SCREAMING_SNAKE_CASE__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() SCREAMING_SNAKE_CASE__ = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] ): SCREAMING_SNAKE_CASE__ = None if token is not None: SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ , allow_redirects=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = result.headers["""Location"""] SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , allow_redirects=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''{artifact_name}.zip''' ) with open(UpperCamelCase__ , """wb""" ) as fp: fp.write(response.content ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any=None ): SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = None with zipfile.ZipFile(UpperCamelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCamelCase__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(UpperCamelCase__ ) as f: for line in f: SCREAMING_SNAKE_CASE__ = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE__ = line[: line.index(""": """ )] SCREAMING_SNAKE_CASE__ = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed SCREAMING_SNAKE_CASE__ = line[len("""FAILED """ ) :] failed_tests.append(UpperCamelCase__ ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE__ = line if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( f'''`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase__ )} for `errors` ''' f'''and {len(UpperCamelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' """ problem.""" ) SCREAMING_SNAKE_CASE__ = None if job_name and job_links: SCREAMING_SNAKE_CASE__ = job_links.get(UpperCamelCase__ , UpperCamelCase__ ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE__ = [x + [y] + [job_link] for x, y in zip(UpperCamelCase__ , UpperCamelCase__ )] return result def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ): SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(UpperCamelCase__ , job_links=UpperCamelCase__ ) ) return errors def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: str=None ): SCREAMING_SNAKE_CASE__ = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE__ = counter.most_common() SCREAMING_SNAKE_CASE__ = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE__ = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) ) return r def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): SCREAMING_SNAKE_CASE__ = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): SCREAMING_SNAKE_CASE__ = test.split("""/""" )[2] else: SCREAMING_SNAKE_CASE__ = None return test def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ): SCREAMING_SNAKE_CASE__ = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE__ = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE__ = {x[2] for x in logs} SCREAMING_SNAKE_CASE__ = {} for test in tests: SCREAMING_SNAKE_CASE__ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE__ = counter.most_common() SCREAMING_SNAKE_CASE__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE__ = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE__ = {"""count""": n_errors, """errors""": error_counts} SCREAMING_SNAKE_CASE__ = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) ) return r def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): SCREAMING_SNAKE_CASE__ = """| no. | error | status |""" SCREAMING_SNAKE_CASE__ = """|-:|:-|:-|""" SCREAMING_SNAKE_CASE__ = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE__ = reduced_by_error[error]["""count"""] SCREAMING_SNAKE_CASE__ = f'''| {count} | {error[:100]} | |''' lines.append(UpperCamelCase__ ) return "\n".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): SCREAMING_SNAKE_CASE__ = """| model | no. of errors | major error | count |""" SCREAMING_SNAKE_CASE__ = """|-:|-:|-:|-:|""" SCREAMING_SNAKE_CASE__ = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE__ = reduced_by_model[model]["""count"""] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(reduced_by_model[model]["""errors"""].items() )[0] SCREAMING_SNAKE_CASE__ = f'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(UpperCamelCase__ ) return "\n".join(UpperCamelCase__ ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') _lowerCamelCase = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _lowerCamelCase = get_job_links(args.workflow_run_id, token=args.token) _lowerCamelCase = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _lowerCamelCase = k.find(' / ') _lowerCamelCase = k[index + len(' / ') :] _lowerCamelCase = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _lowerCamelCase = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _lowerCamelCase = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _lowerCamelCase = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _lowerCamelCase = reduce_by_error(errors) _lowerCamelCase = reduce_by_model(errors) _lowerCamelCase = make_github_table(reduced_by_error) _lowerCamelCase = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
6
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __a :Dict = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = ViTConfig() A_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A_ = True A_ = int(vit_name[-12:-10] ) A_ = int(vit_name[-9:-6] ) else: A_ = 1000 A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} A_ = int(vit_name[-6:-4] ) A_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A_ = 192 A_ = 768 A_ = 12 A_ = 3 elif vit_name[9:].startswith("small" ): A_ = 384 A_ = 1536 A_ = 12 A_ = 6 else: pass else: if vit_name[4:].startswith("small" ): A_ = 768 A_ = 2304 A_ = 8 A_ = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 elif vit_name[4:].startswith("huge" ): A_ = 1280 A_ = 5120 A_ = 32 A_ = 16 # load original model from timm A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": A_ = ViTModel(__UpperCamelCase ).eval() else: A_ = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A_ = DeiTImageProcessor(size=config.image_size ) else: A_ = ViTImageProcessor(size=config.image_size ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = encoding["pixel_values"] A_ = model(__UpperCamelCase ) if base_model: A_ = timm_model.forward_features(__UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 ) else: A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[int] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
86
0
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger() @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : nn.Module UpperCAmelCase : List[nn.Module] = field(default_factory=__lowerCAmelCase ) UpperCAmelCase : list = field(default_factory=__lowerCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Tensor , _UpperCAmelCase : Tensor ): _A = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__( self : List[str] , _UpperCAmelCase : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def lowerCAmelCase_ ( self : Tuple ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : nn.Module UpperCAmelCase : nn.Module UpperCAmelCase : int = 0 UpperCAmelCase : List = field(default_factory=__lowerCAmelCase ) UpperCAmelCase : List = field(default_factory=__lowerCAmelCase ) def __call__( self : Dict , _UpperCAmelCase : Tensor ): _A = Tracker(self.dest )(_UpperCAmelCase ).parametrized _A = Tracker(self.src )(_UpperCAmelCase ).parametrized _A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) _A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( F'''Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while''' F''' destination module has {len(_UpperCAmelCase )}.''' ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _snake_case ( _snake_case : str , _snake_case : ResNetConfig , _snake_case : Path , _snake_case : bool = True ) -> Union[str, Any]: '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): _A = timm.create_model(_snake_case , pretrained=_snake_case ).eval() _A = ResNetForImageClassification(_snake_case ).eval() _A = ModuleTransfer(src=_snake_case , dest=_snake_case ) _A = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(_snake_case ) assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one." _A = F'''resnet{"-".join(name.split("resnet" ) )}''' print(_snake_case ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_snake_case , ) # we can use the convnext one _A = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_snake_case , ) print(F'''Pushed {checkpoint_name}''' ) def _snake_case ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) -> Tuple: '''simple docstring''' _A = 'imagenet-1k-id2label.json' _A = 10_00 _A = (1, num_labels) _A = 'huggingface/label-files' _A = num_labels _A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) ) _A = {int(_snake_case ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} _A = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case ) _A = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case ) return config, expected_shape if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) a = parser.parse_args() a = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
7
def __snake_case ( __UpperCamelCase : int = 50 ): """simple docstring""" A_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
86
0
'''simple docstring''' import random def _lowerCAmelCase ( __snake_case : int ) -> bool: __A : Tuple = num - 1 __A : Optional[Any] = 0 while s % 2 == 0: __A : Optional[int] = s // 2 t += 1 for _ in range(5 ): __A : List[str] = random.randrange(2 , num - 1 ) __A : str = pow(__snake_case , __snake_case , __snake_case ) if v != 1: __A : Optional[int] = 0 while v != (num - 1): if i == t - 1: return False else: __A : Optional[int] = i + 1 __A : Optional[int] = (v**2) % num return True def _lowerCAmelCase ( __snake_case : int ) -> bool: if num < 2: return False __A : Optional[int] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__snake_case ) def _lowerCAmelCase ( __snake_case : int = 10_24 ) -> int: while True: __A : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__snake_case ): return num if __name__ == "__main__": lowercase__ : List[str] = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
8
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __a :List[str] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , **UpperCAmelCase : List[str] ): super().__init__(**UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type(UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ): if "text_queries" in kwargs: A_ = kwargs.pop("text_queries" ) if isinstance(UpperCAmelCase , (str, Image.Image) ): A_ = {"image": image, "candidate_labels": candidate_labels} else: A_ = image A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase ) return results def __A ( self : int , **UpperCAmelCase : Tuple ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] if "top_k" in kwargs: A_ = kwargs["top_k"] return {}, {}, postprocess_params def __A ( self : List[str] , UpperCAmelCase : Dict ): A_ = load_image(inputs["image"] ) A_ = inputs["candidate_labels"] if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = candidate_labels.split("," ) A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase ): A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework ) A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self : str , UpperCAmelCase : int ): A_ = model_inputs.pop("target_size" ) A_ = model_inputs.pop("candidate_label" ) A_ = model_inputs.pop("is_last" ) A_ = self.model(**UpperCAmelCase ) A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ): A_ = [] for model_output in model_outputs: A_ = model_output["candidate_label"] A_ = BaseModelOutput(UpperCAmelCase ) A_ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): A_ = outputs["scores"][index].item() A_ = self._get_bounding_box(outputs["boxes"][index][0] ) A_ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase ) A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase ) if top_k: A_ = results[:top_k] return results def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
9
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) __a :int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __a :Tuple = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ): """simple docstring""" A_ = torch.load(__UpperCamelCase ) A_ = WavLMConfigOrig(checkpoint["cfg"] ) A_ = WavLMOrig(__UpperCamelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: A_ = WavLMConfig.from_pretrained(__UpperCamelCase ) else: A_ = WavLMConfig() A_ = WavLMModel(__UpperCamelCase ) recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ) hf_wavlm.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a :Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
0
import random def _snake_case ( __snake_case , __snake_case , __snake_case = False ): _UpperCamelCase = {i: [] for i in range(__snake_case )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(__snake_case ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(__snake_case ): for j in range(i + 1 , __snake_case ): if random.random() < probability: graph[i].append(__snake_case ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(__snake_case ) return graph def _snake_case ( __snake_case ): return { i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case ) } if __name__ == "__main__": import doctest doctest.testmod()
10
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ): """simple docstring""" A_ = length or len(__UpperCamelCase ) A_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A_ , A_ = list_data[i + 1], list_data[i] A_ = True return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
0
'''simple docstring''' import collections import os import re from pathlib import Path lowercase_ = "src/transformers" # Matches is_xxx_available() lowercase_ = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowercase_ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase_ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowercase_ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowercase_ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase_ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowercase_ = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase_ = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowercase_ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowercase_ = re.compile(R"^\s*try:") # Catches a line with else: lowercase_ = re.compile(R"^\s*else:") def lowerCAmelCase (__A): """simple docstring""" if _re_test_backend.search(__A) is None: return None _a = [b[0] for b in _re_backend.findall(__A)] backends.sort() return "_and_".join(__A) def lowerCAmelCase (__A): """simple docstring""" with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''') as f: _a = f.readlines() _a = 0 while line_index < len(__A) and not lines[line_index].startswith('''_import_structure = {'''): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__A): return None # First grab the objects without a specific backend in _import_structure _a = [] while not lines[line_index].startswith('''if TYPE_CHECKING''') and find_backend(lines[line_index]) is None: _a = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__A): _a = _re_one_line_import_struct.search(__A).groups()[0] _a = re.findall(r'''\[([^\]]+)\]''' , __A) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''')]) line_index += 1 continue _a = _re_import_struct_key_value.search(__A) if single_line_import_search is not None: _a = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''') if len(__A) > 0] objects.extend(__A) elif line.startswith(''' ''' * 8 + '''"'''): objects.append(line[9:-3]) line_index += 1 _a = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING'''): # If the line is an if not is_backend_available, we grab all objects associated. _a = find_backend(lines[line_index]) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1]) is None: _a = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 _a = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 4): _a = lines[line_index] if _re_import_struct_add_one.search(__A) is not None: objects.append(_re_import_struct_add_one.search(__A).groups()[0]) elif _re_import_struct_add_many.search(__A) is not None: _a = _re_import_struct_add_many.search(__A).groups()[0].split(''', ''') _a = [obj[1:-1] for obj in imports if len(__A) > 0] objects.extend(__A) elif _re_between_brackets.search(__A) is not None: _a = _re_between_brackets.search(__A).groups()[0].split(''', ''') _a = [obj[1:-1] for obj in imports if len(__A) > 0] objects.extend(__A) elif _re_quote_object.search(__A) is not None: objects.append(_re_quote_object.search(__A).groups()[0]) elif line.startswith(''' ''' * 8 + '''"'''): objects.append(line[9:-3]) elif line.startswith(''' ''' * 12 + '''"'''): objects.append(line[13:-3]) line_index += 1 _a = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _a = [] while ( line_index < len(__A) and find_backend(lines[line_index]) is None and not lines[line_index].startswith('''else''') ): _a = lines[line_index] _a = _re_import.search(__A) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''')) elif line.startswith(''' ''' * 8): objects.append(line[8:-2]) line_index += 1 _a = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(__A): # If the line is an if is_backend_available, we grab all objects associated. _a = find_backend(lines[line_index]) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1]) is None: _a = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 _a = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 8): _a = lines[line_index] _a = _re_import.search(__A) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''')) elif line.startswith(''' ''' * 12): objects.append(line[12:-2]) line_index += 1 _a = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowerCAmelCase (__A , __A): """simple docstring""" def find_duplicates(__A): return [k for k, v in collections.Counter(__A).items() if v > 1] if list(import_dict_objects.keys()) != list(type_hint_objects.keys()): return ["Both sides of the init do not have the same backends!"] _a = [] for key in import_dict_objects.keys(): _a = find_duplicates(import_dict_objects[key]) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''') _a = find_duplicates(type_hint_objects[key]) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''') if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])): _a = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''') for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''') for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''') return errors def lowerCAmelCase (): """simple docstring""" _a = [] for root, _, files in os.walk(__A): if "__init__.py" in files: _a = os.path.join(__A , '''__init__.py''') _a = parse_init(__A) if objects is not None: _a = analyze_results(*__A) if len(__A) > 0: _a = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(__A)) if len(__A) > 0: raise ValueError('''\n\n'''.join(__A)) def lowerCAmelCase (): """simple docstring""" _a = [] for path, directories, files in os.walk(__A): for folder in directories: # Ignore private modules if folder.startswith('''_'''): directories.remove(__A) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__A) / folder).glob('''*.py'''))) == 0: continue _a = str((Path(__A) / folder).relative_to(__A)) _a = short_path.replace(os.path.sep , '''.''') submodules.append(__A) for fname in files: if fname == "__init__.py": continue _a = str((Path(__A) / fname).relative_to(__A)) _a = short_path.replace('''.py''' , '''''').replace(os.path.sep , '''.''') if len(submodule.split('''.''')) == 1: submodules.append(__A) return submodules lowercase_ = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def lowerCAmelCase (): """simple docstring""" from transformers.utils import direct_transformers_import _a = direct_transformers_import(__A) _a = set(transformers._import_structure.keys()) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__A , '''__init__.py''') , '''r''') as f: _a = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , __A))) _a = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__A) > 0: _a = '''\n'''.join(F'''- {module}''' for module in module_not_registered) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''') if __name__ == "__main__": check_all_inits() check_submodules()
11
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , 0.1 ) A_ = Accelerator() A_ = accelerator.prepare(UpperCAmelCase ) try: pickle.loads(pickle.dumps(UpperCAmelCase ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
86
0
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(lowercase_ , lowercase_ ): raise TypeError("""Input value must be a 'int' type""" ) return bin(lowercase_ ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
12
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a :List[str] = logging.get_logger(__name__) __a :Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __a :Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ = None for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True elif name.split("." )[0] == "proj": A_ = fairseq_model.proj A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.split(" " )[0] for line in lines] A_ = len(__UpperCamelCase ) A_ = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,): """simple docstring""" A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase ) A_ = SpeechaTextaConfig.from_pretrained( __UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase ) A_ = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ = model[0].eval() # set weights for wav2vec2 encoder A_ = WavaVecaModel(__UpperCamelCase ) A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase ) A_ = SpeechaTextaForCausalLM(__UpperCamelCase ) A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase ) A_ = False # add projection layer A_ = nn.Parameter(projection_layer.weight ) A_ = nn.Parameter(projection_layer.bias ) A_ = create_vocab_dict(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) ) tokenizer.save_pretrained(__UpperCamelCase ) A_ = hf_wavavec.config.to_dict() A_ = tokenizer.pad_token_id A_ = tokenizer.bos_token_id A_ = tokenizer.eos_token_id A_ = "speech_to_text_2" A_ = "wav2vec2" A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) feature_extractor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __a :Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
86
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : str = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) __lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCamelCase : int = 'A painting of a squirrel eating a burger' __lowerCamelCase : Tuple = torch.manual_seed(0 ) __lowerCamelCase : Tuple = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) __lowerCamelCase : Union[str, Any] = output.images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase : List[str] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCamelCase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger' __lowerCamelCase : Any = torch.manual_seed(0 ) __lowerCamelCase : List[str] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) __lowerCamelCase : List[str] = output.images __lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase : Tuple = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) __lowerCamelCase : Any = 'A painting of a squirrel eating a burger' __lowerCamelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCamelCase : Optional[int] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[Any] = output.images __lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase : List[Any] = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
13
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a :str = logging.get_logger(__name__) __a :Any = Dict[str, Any] __a :int = List[Prediction] @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __A ( self : str , **UpperCAmelCase : str ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return super().__call__(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any ): A_ = load_image(UpperCAmelCase ) A_ = torch.IntTensor([[image.height, image.width]] ) A_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) A_ = target_size return inputs def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = model_inputs.pop("target_size" ) A_ = self.model(**UpperCAmelCase ) A_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: A_ = model_inputs["bbox"] return model_outputs def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ): A_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_ , A_ = target_size[0].tolist() def unnormalize(UpperCAmelCase : Any ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )] A_ = ["score", "label", "box"] A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = raw_annotations[0] A_ = raw_annotation["scores"] A_ = raw_annotation["labels"] A_ = raw_annotation["boxes"] A_ = scores.tolist() A_ = [self.model.config.idalabel[label.item()] for label in labels] A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ = ["score", "label", "box"] A_ = [ dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[str] = StableDiffusionInpaintPipeline UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase__ : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase__ : Optional[int] = frozenset([] ) def __lowercase ( self ) -> Tuple: torch.manual_seed(0 ) _a : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_a , ) _a : List[Any] = PNDMScheduler(skip_prk_steps=_a ) torch.manual_seed(0 ) _a : Tuple = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _a : str = CLIPTextModel(_a ) _a : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _a : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowercase ( self , _a , _a=0 ) -> List[Any]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched _a : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a ) _a : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] _a : Tuple = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((6_4, 6_4) ) _a : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) ) if str(_a ).startswith('''mps''' ): _a : Optional[int] = torch.manual_seed(_a ) else: _a : Dict = torch.Generator(device=_a ).manual_seed(_a ) _a : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowercase ( self ) -> List[str]: _a : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator _a : List[str] = self.get_dummy_components() _a : List[str] = StableDiffusionInpaintPipeline(**_a ) _a : Tuple = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) _a : Union[str, Any] = self.get_dummy_inputs(_a ) _a : List[str] = sd_pipe(**_a ).images _a : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) _a : str = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase ( self ) -> Any: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self ) -> List[Any]: _a : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) _a : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) _a : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) _a : Optional[int] = '''stabilityai/stable-diffusion-2-inpainting''' _a : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() _a : int = '''Face of a yellow cat, high resolution, sitting on a park bench''' _a : List[Any] = torch.manual_seed(0 ) _a : Any = pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , ) _a : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def __lowercase ( self ) -> Dict: _a : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) _a : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) _a : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) _a : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' _a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained( _a , torch_dtype=torch.floataa , safety_checker=_a , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() _a : List[Any] = '''Face of a yellow cat, high resolution, sitting on a park bench''' _a : Dict = torch.manual_seed(0 ) _a : Any = pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , ) _a : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def __lowercase ( self ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _a : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) _a : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) _a : List[Any] = '''stabilityai/stable-diffusion-2-inpainting''' _a : Optional[int] = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' ) _a : Tuple = StableDiffusionInpaintPipeline.from_pretrained( _a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _a : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' _a : Tuple = torch.manual_seed(0 ) _a : Dict = pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , ) _a : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
14
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ , A_ = image.size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image[None].transpose(0 ,3 ,1 ,2 ) A_ = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = 1 elif isinstance(UpperCAmelCase , torch.Tensor ): A_ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' ) if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = preprocess(UpperCAmelCase ) A_ , A_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A_ = (batch_size, self.unet.config.in_channels // 2, height, width) A_ = next(self.unet.parameters() ).dtype A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) A_ = image.to(device=self.device , dtype=UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase , device=self.device ) A_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for t in self.progress_bar(UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. A_ = torch.cat([latents, image] , dim=1 ) A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE A_ = self.vqvae.decode(UpperCAmelCase ).sample A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 ) A_ = image / 2 + 0.5 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
86
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed A : int = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def UpperCamelCase ( __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def UpperCamelCase ( __magic_name__ : int , __magic_name__ : str ) -> Optional[int]: """simple docstring""" if args.student_type == "roberta": lowercase__ = False elif args.student_type == "gpt2": lowercase__ = False def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Tuple: """simple docstring""" if args.student_type == "roberta": lowercase__ = False def UpperCamelCase ( ) -> str: """simple docstring""" lowercase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__magic_name__ , required=__magic_name__ , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__magic_name__ , required=__magic_name__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__magic_name__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__magic_name__ , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__magic_name__ , required=__magic_name__ , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__magic_name__ , type=__magic_name__ , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__magic_name__ , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__magic_name__ , required=__magic_name__ , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__magic_name__ , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__magic_name__ , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__magic_name__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__magic_name__ , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__magic_name__ , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__magic_name__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.1_5 , type=__magic_name__ , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__magic_name__ , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__magic_name__ , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__magic_name__ , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__magic_name__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__magic_name__ , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__magic_name__ , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__magic_name__ , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__magic_name__ , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=__magic_name__ , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__magic_name__ , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__magic_name__ , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__magic_name__ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__magic_name__ , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.0_2 , type=__magic_name__ , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__magic_name__ , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__magic_name__ , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__magic_name__ , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__magic_name__ , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__magic_name__ , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__magic_name__ , default=4000 , help="""Checkpoint interval.""" ) lowercase__ = parser.parse_args() sanity_checks(__magic_name__ ) # ARGS # init_gpu_params(__magic_name__ ) set_seed(__magic_name__ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__magic_name__ ) , __magic_name__ , indent=4 ) git_log(args.dump_path ) lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[args.student_type] lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowercase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowercase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowercase__ = tokenizer.all_special_tokens.index(__magic_name__ ) lowercase__ = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) lowercase__ = special_tok_ids lowercase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , """rb""" ) as fp: lowercase__ = pickle.load(__magic_name__ ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , """rb""" ) as fp: lowercase__ = pickle.load(__magic_name__ ) lowercase__ = np.maximum(__magic_name__ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowercase__ = 0.0 # do not predict special tokens lowercase__ = torch.from_numpy(__magic_name__ ) else: lowercase__ = None lowercase__ = LmSeqsDataset(params=__magic_name__ , data=__magic_name__ ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) lowercase__ = student_config_class.from_pretrained(args.student_config ) lowercase__ = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowercase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__magic_name__ ) else: lowercase__ = student_model_class(__magic_name__ ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info("""Student loaded.""" ) # TEACHER # lowercase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__magic_name__ ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__magic_name__ , __magic_name__ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__magic_name__ , __magic_name__ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowercase__ = Distiller( params=__magic_name__ , dataset=__magic_name__ , token_probs=__magic_name__ , student=__magic_name__ , teacher=__magic_name__ ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
15
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __a :list[bool | None] = [None] * 1000_0000 __a :Optional[Any] = True __a :List[Any] = False def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ = chain(next_number(__UpperCamelCase ) ) A_ = number_chain while number < 1000_0000: A_ = number_chain number *= 10 return number_chain def __snake_case ( __UpperCamelCase : int = 1000_0000 ): """simple docstring""" for i in range(1 ,__UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
86
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=13 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=10 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : List[str]="divided_space_time" , __lowerCamelCase : List[Any]=None , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_frames SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = attention_type SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE = (num_frames) * self.num_patches_per_frame + 1 def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE = self.num_labels return config def _snake_case ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = TimesformerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = TimesformerForVideoClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) # verify the logits shape SCREAMING_SNAKE_CASE = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowerCamelCase ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCamelCase__ = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = TimesformerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str=False ): SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _snake_case ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def _snake_case ( self : Optional[Any] ): pass def _snake_case ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase ) @slow def _snake_case ( self : str ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = TimesformerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _snake_case ( self : List[Any] ): if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = self.model_tester.seq_length SCREAMING_SNAKE_CASE = self.model_tester.num_frames SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(out_len + 1 , len(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _snake_case ( self : Any ): def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) SCREAMING_SNAKE_CASE = np.load(A__ ) return list(A__ ) @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Union[str, Any] ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_video() SCREAMING_SNAKE_CASE = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :List[Any] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : Optional[Any] ): __A : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) __A : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house __A : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim __A : int = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __A : Dict = model(__A )["""last_hidden_state"""].detach() self.assertEqual(output.shape , __A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) ) @slow def lowerCAmelCase_ ( self : List[str] ): __A : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) __A : int = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house __A : Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim __A : Dict = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __A : Dict = model(__A )["""last_hidden_state"""].detach() self.assertEqual(output.shape , __A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
17
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :List[Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Optional[int] , UpperCAmelCase : int ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : List[str] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : Any , UpperCAmelCase : Dict ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
86
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = "swinv2" __lowerCamelCase : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(_lowerCAmelCase ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) _lowerCAmelCase = (0, 0, 0, 0)
18
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __a :Any = logging.getLogger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ): super().__init__( UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , ) A_ = None def __A ( self : Dict , UpperCAmelCase : int ): logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually A_ = self._infer_socket_ifname() # avoid clash with the NCCL port A_ = str(distributed_port + 1 ) A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __A ( self : List[str] ): return dist.get_rank(group=self.process_group ) == 0 def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ): A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase ) dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group ) return target_tensor def __A ( self : Any ): A_ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase ) return ifname def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ): # single GPU training if not dist.is_initialized(): A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase ) # distributed training A_ = dist.get_world_size(group=self.process_group ) # gather logic A_ = None if self._is_main(): A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )] dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group ) # scatter logic A_ = question_hidden_states.shape[0] A_ = [] A_ = [] if self._is_main(): assert len(UpperCAmelCase ) == world_size A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase ) A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
86
0
"""simple docstring""" from timeit import timeit _a = { """MALAYALAM""": True, """String""": False, """rotor""": True, """level""": True, """A""": True, """BB""": True, """ABC""": False, """amanaplanacanalpanama""": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(__snake_case ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" _UpperCamelCase = len(__snake_case ) // 2 _UpperCamelCase = len(__snake_case ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(__snake_case ) ) def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" if len(__snake_case ) <= 2: return True if s[0] == s[len(__snake_case ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" return s == s[::-1] def lowerCamelCase__ ( __snake_case ) -> None: """simple docstring""" _UpperCamelCase = F'''all({name}(key) is value for key, value in test_data.items())''' _UpperCamelCase = F'''from __main__ import test_data, {name}''' _UpperCamelCase = 50_00_00 _UpperCamelCase = timeit(stmt=__snake_case, setup=__snake_case, number=__snake_case ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F"""{key:21} {value}""") print("""a man a plan a canal panama""") # finished 500,000 runs in 0.46793 seconds benchmark_function("""is_palindrome_slice""") # finished 500,000 runs in 0.85234 seconds benchmark_function("""is_palindrome""") # finished 500,000 runs in 1.32028 seconds benchmark_function("""is_palindrome_recursive""") # finished 500,000 runs in 2.08679 seconds benchmark_function("""is_palindrome_traversal""")
19
from jiwer import compute_measures import datasets __a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ): if concatenate_texts: return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"] else: A_ = 0 A_ = 0 for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ): A_ = compute_measures(UpperCAmelCase , UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
86
0
def _lowercase( __a : list ): if not isinstance(__a , __a ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(__a ) == 0: raise ValueError('Input list must be a non empty list' ) if len(__a ) == 1: return True a__ =series[1] - series[0] for index in range(len(__a ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _lowercase( __a : list ): if not isinstance(__a , __a ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(__a ) == 0: raise ValueError('Input list must be a non empty list' ) a__ =0 for val in series: answer += val return answer / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
20
class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = None A_ = None A_ = graph self._normalize_graph(UpperCAmelCase , UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = None def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ): if sources is int: A_ = [sources] if sinks is int: A_ = [sinks] if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0: return A_ = sources[0] A_ = sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1: A_ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A_ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A_ = max_input_flow A_ = 0 A_ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A_ = max_input_flow A_ = size - 1 def __A ( self : str ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __A ( self : Tuple , UpperCAmelCase : List[Any] ): A_ = algorithm(self ) class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : List[str] ): A_ = flow_network A_ = flow_network.verticesCount A_ = flow_network.sourceIndex A_ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A_ = flow_network.graph A_ = False def __A ( self : Optional[int] ): if not self.executed: self._algorithm() A_ = True def __A ( self : Dict ): pass class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ): super().__init__(UpperCAmelCase ) # use this to save your result A_ = -1 def __A ( self : Tuple ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ): super().__init__(UpperCAmelCase ) A_ = [[0] * self.verticies_count for i in range(self.verticies_count )] A_ = [0] * self.verticies_count A_ = [0] * self.verticies_count def __A ( self : List[str] ): A_ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A_ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A_ = 0 while i < len(UpperCAmelCase ): A_ = vertices_list[i] A_ = self.heights[vertex_index] self.process_vertex(UpperCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) ) A_ = 0 else: i += 1 A_ = sum(self.preflow[self.source_index] ) def __A ( self : List[str] , UpperCAmelCase : Dict ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCAmelCase , UpperCAmelCase ) self.relabel(UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ): A_ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A_ = self.heights[to_index] if min_height is not None: A_ = min_height + 1 if __name__ == "__main__": __a :Tuple = [0] __a :Tuple = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __a :List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __a :List[Any] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
86
0
from __future__ import annotations def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __magic_name__ : str =[] __magic_name__ , __magic_name__ : str =input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __magic_name__ : int =result + left + right return input_list def lowerCAmelCase_ ( lowerCamelCase ): if len(lowerCamelCase ) <= 1: return input_list __magic_name__ : Any =list(lowerCamelCase ) # iteration for two-way merging __magic_name__ : Optional[Any] =2 while p <= len(lowerCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ): __magic_name__ : Union[str, Any] =i __magic_name__ : Union[str, Any] =i + p - 1 __magic_name__ : Dict =(low + high + 1) // 2 __magic_name__ : str =merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # final merge of last two parts if p * 2 >= len(lowerCamelCase ): __magic_name__ : Any =i __magic_name__ : Any =merge(lowerCamelCase , 0 , lowerCamelCase , len(lowerCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": UpperCAmelCase_ : Dict = input("Enter numbers separated by a comma:\n").strip() if user_input == "": UpperCAmelCase_ : Optional[Any] = [] else: UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
21
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :str = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
86
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[Any] = logging.get_logger(__name__) _snake_case : str = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class A ( _a ): lowercase_ = 'gptsan-japanese' lowercase_ = [ 'past_key_values', ] lowercase_ = { 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict=3_60_00 , lowerCAmelCase_ : Union[str, Any]=12_80 , lowerCAmelCase_ : Optional[int]=10_24 , lowerCAmelCase_ : Union[str, Any]=81_92 , lowerCAmelCase_ : int=40_96 , lowerCAmelCase_ : Dict=1_28 , lowerCAmelCase_ : Optional[int]=10 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : str=1_28 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Union[str, Any]="float32" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[str]=0.0_0_2 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=3_59_98 , lowerCAmelCase_ : Tuple=3_59_95 , lowerCAmelCase_ : Optional[Any]=3_59_99 , **lowerCAmelCase_ : Any , ) -> List[str]: """simple docstring""" _a = vocab_size _a = max_position_embeddings _a = d_model _a = d_ff _a = d_ext _a = d_spout _a = num_switch_layers _a = num_ext_layers _a = num_switch_layers + num_ext_layers _a = num_heads _a = num_experts _a = expert_capacity _a = dropout_rate _a = layer_norm_epsilon _a = router_bias _a = router_jitter_noise _a = router_dtype _a = router_ignore_padding_tokens _a = output_hidden_states _a = output_attentions _a = initializer_factor _a = output_router_logits _a = use_cache super().__init__( separator_token_id=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A_ = f'''{src_lang}-{tgt_lang}''' A_ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = os.path.join(__UpperCamelCase ,"README.md" ) print(f'''Generating {path}''' ) with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f: f.write(__UpperCamelCase ) # make sure we are under the root of the project __a :Optional[Any] = Path(__file__).resolve().parent.parent.parent __a :Optional[Any] = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __a , __a , __a :int = model_name.split('-') __a :str = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
86
0
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _snake_case (__lowercase): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _a ( nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: super().__init__() UpperCamelCase_ = module UpperCamelCase_ = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) UpperCamelCase_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCAmelCase ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _a ( unittest.TestCase ): """simple docstring""" A_ = """bigscience/bloom-1b7""" # Constant values A_ = 2.109_659_552_692_574 A_ = """Hello my name is""" A_ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) A_ = 10 def _UpperCAmelCase ( self ) -> List[Any]: # Models and tokenizer UpperCamelCase_ = AutoTokenizer.from_pretrained(self.model_name ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> List[Any]: super().setUp() # Models and tokenizer UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def _UpperCAmelCase ( self ) -> Dict: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) UpperCamelCase_ = config.to_dict() UpperCamelCase_ = config.to_diff_dict() UpperCamelCase_ = config.to_json_string() def _UpperCAmelCase ( self ) -> int: from bitsandbytes.nn import Paramsabit UpperCamelCase_ = self.model_fpaa.get_memory_footprint() UpperCamelCase_ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) UpperCamelCase_ = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCAmelCase ( self ) -> Any: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) UpperCamelCase_ = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = BitsAndBytesConfig() UpperCamelCase_ = True UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) UpperCamelCase_ = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def _UpperCAmelCase ( self ) -> int: with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def _UpperCAmelCase ( self ) -> Optional[Any]: with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) UpperCamelCase_ = self.model_fpaa.to(torch.floataa ) UpperCamelCase_ = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error UpperCamelCase_ = self.model_fpaa.to('cpu' ) # Check this does not throw an error UpperCamelCase_ = self.model_fpaa.half() # Check this does not throw an error UpperCamelCase_ = self.model_fpaa.float() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _a ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCAmelCase ( cls ) -> Tuple: UpperCamelCase_ = 't5-small' UpperCamelCase_ = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense UpperCamelCase_ = AutoTokenizer.from_pretrained(cls.model_name ) UpperCamelCase_ = 'Translate in German: Hello, my dog is cute' def _UpperCAmelCase ( self ) -> List[Any]: gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Any: from transformers import TaForConditionalGeneration UpperCamelCase_ = TaForConditionalGeneration._keep_in_fpaa_modules UpperCamelCase_ = None # test with `t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) UpperCamelCase_ = modules def _UpperCAmelCase ( self ) -> str: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> Union[str, Any]: super().setUp() # model_name UpperCamelCase_ = 'bigscience/bloom-560m' UpperCamelCase_ = 't5-small' # Different types of model UpperCamelCase_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Union[str, Any]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: super().setUp() def _UpperCAmelCase ( self ) -> Optional[Any]: del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass UpperCamelCase_ = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> Dict: super().setUp() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch UpperCamelCase_ = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = 'facebook/opt-350m' super().setUp() def _UpperCAmelCase ( self ) -> Optional[Any]: if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): UpperCamelCase_ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability UpperCamelCase_ = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): UpperCamelCase_ = LoRALayer(module.q_proj , rank=16 ) UpperCamelCase_ = LoRALayer(module.k_proj , rank=16 ) UpperCamelCase_ = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch UpperCamelCase_ = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): UpperCamelCase_ = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """gpt2-xl""" A_ = 3.3_191_854_854_152_187
23
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : str = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['torch', 'transformers', 'onnx'] def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
86
0
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration UpperCAmelCase_ : List[Any] = 5_0_0_0_0 UpperCAmelCase_ : Optional[Any] = 5_0_0_0 UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = os.path.split(__file__) UpperCAmelCase_ : Tuple = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[int] )-> Optional[int]: '''simple docstring''' for i in range(_lowerCamelCase ): __snake_case = dataset[i] @get_duration def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> str: '''simple docstring''' for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ): __snake_case = dataset[i : i + batch_size] @get_duration def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : int , _lowerCamelCase : Dict )-> str: '''simple docstring''' with dataset.formatted_as(type=_lowerCamelCase ): for i in range(_lowerCamelCase ): __snake_case = dataset[i] @get_duration def _UpperCamelCase (_lowerCamelCase : datasets.Dataset , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple )-> str: '''simple docstring''' with dataset.formatted_as(type=_lowerCamelCase ): for i in range(0 , _lowerCamelCase , _lowerCamelCase ): __snake_case = dataset[i : i + batch_size] def _UpperCamelCase ()-> Tuple: '''simple docstring''' __snake_case = {'''num examples''': SPEED_TEST_N_EXAMPLES} __snake_case = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}), ] __snake_case = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''' ) __snake_case = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} ) __snake_case = generate_example_dataset( os.path.join(_lowerCamelCase , '''dataset.arrow''' ) , _lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes={'''list''': (1_00,)} , ) print('''first set of iterations''' ) for func, kwargs in functions: print(func.__name__ , str(_lowerCamelCase ) ) __snake_case = func(_lowerCamelCase , **_lowerCamelCase ) print('''shuffling dataset''' ) __snake_case = dataset.shuffle() print('''Second set of iterations (after shuffling''' ) for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(_lowerCamelCase ) ) __snake_case = func( _lowerCamelCase , **_lowerCamelCase ) with open(_lowerCamelCase , '''wb''' ) as f: f.write(json.dumps(_lowerCamelCase ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
24
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,) def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ): A_ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase ) return config def __A ( self : Optional[Any] ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def __A ( self : Dict ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def __A ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def __A ( self : Tuple ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase ) def __A ( self : int ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase ) def __A ( self : Union[str, Any] ): self.check_over_configs(thresholding=UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , ) def __A ( self : Optional[int] ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def __A ( self : Tuple ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = self.dummy_sample_deter + 0.1 A_ = self.dummy_sample_deter - 0.1 A_ = samplea.shape[0] A_ = torch.stack([samplea, samplea, samplea] , dim=0 ) A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase ) A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5_005 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config(prediction_type="v_prediction" ) A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def __A ( self : Union[str, Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase ) A_ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase ): if i == len(UpperCAmelCase ) - 1: A_ = -1 else: A_ = timesteps[i + 1] A_ = scheduler.previous_timestep(UpperCAmelCase ) A_ = prev_t.item() self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] A_ = len(UpperCAmelCase ) with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase )
86
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] , a : Optional[int] , a : str=99 , a : str=13 , a : List[Any]=7 , a : Optional[int]=9 , a : Optional[int]=True , a : Union[str, Any]=True , a : Any=False , a : Tuple=32 , a : List[str]=5 , a : Union[str, Any]=4 , a : Union[str, Any]=37 , a : str=8 , a : int=0.1 , a : Optional[int]=0.002 , a : Union[str, Any]=1 , a : Optional[int]=0 , a : Tuple=0 , a : Any=None , a : Optional[Any]=None , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Optional[int] = encoder_seq_length SCREAMING_SNAKE_CASE : Tuple = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE : int = self.decoder_seq_length SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Any = use_attention_mask SCREAMING_SNAKE_CASE : List[str] = use_labels SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = d_ff SCREAMING_SNAKE_CASE : Tuple = relative_attention_num_buckets SCREAMING_SNAKE_CASE : List[Any] = dropout_rate SCREAMING_SNAKE_CASE : int = initializer_factor SCREAMING_SNAKE_CASE : List[Any] = eos_token_id SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_start_token_id SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Tuple = decoder_layers def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return TaConfig.from_pretrained("google/umt5-base" ) def __UpperCamelCase ( self : Optional[int] , a : List[str] , a : Optional[Any] , a : Optional[int] , a : Tuple=None , a : List[Any]=None , a : int=None , a : Any=None , a : Dict=None , ) -> List[str]: """simple docstring""" if attention_mask is None: SCREAMING_SNAKE_CASE : int = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE : Any = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: SCREAMING_SNAKE_CASE : Tuple = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input SCREAMING_SNAKE_CASE : int = input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE : Any = self.get_config() SCREAMING_SNAKE_CASE : Union[str, Any] = config.num_attention_heads SCREAMING_SNAKE_CASE : Dict = self.prepare_inputs_dict(a , a , a ) return config, input_dict def __UpperCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCamelCase ( self : int ) -> List[Any]: """simple docstring""" return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCamelCase ( self : Dict , a : List[str] , a : Dict , a : int , a : Tuple , a : List[str] , a : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = UMTaModel(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model( input_ids=a , decoder_input_ids=a , attention_mask=a , decoder_attention_mask=a , ) SCREAMING_SNAKE_CASE : Any = model(input_ids=a , decoder_input_ids=a ) SCREAMING_SNAKE_CASE : Optional[Any] = result.last_hidden_state SCREAMING_SNAKE_CASE : List[str] = result.past_key_values SCREAMING_SNAKE_CASE : List[str] = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(a ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __UpperCamelCase ( self : List[str] , a : Optional[int] , a : Tuple , a : Tuple , a : List[Any] , a : List[Any] , a : Optional[int] , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = UMTaModel(config=a ).get_decoder().to(a ).eval() # first forward pass SCREAMING_SNAKE_CASE : int = model(a , use_cache=a ) SCREAMING_SNAKE_CASE : Dict = model(a ) SCREAMING_SNAKE_CASE : str = model(a , use_cache=a ) self.parent.assertTrue(len(a ) == len(a ) ) self.parent.assertTrue(len(a ) == len(a ) + 1 ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and SCREAMING_SNAKE_CASE : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE : str = model(a )["last_hidden_state"] SCREAMING_SNAKE_CASE : List[str] = model(a , past_key_values=a )["last_hidden_state"] # select random slice SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE : Any = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) ) def __UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : int , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str = UMTaModel(config=a ).to(a ).half().eval() SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a )["last_hidden_state"] self.parent.assertFalse(torch.isnan(a ).any().item() ) @require_torch class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) lowerCamelCase__ =(UMTaForConditionalGeneration,) if is_torch_available() else () lowerCamelCase__ =( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =True lowerCamelCase__ =True # The small UMT5 model needs higher percentages for CPU/MP tests lowerCamelCase__ =[0.8, 0.9] def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : int = UMTaModel(config_and_inputs[0] ).to(a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=a , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*a ) def __UpperCamelCase ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : int = ["encoder_attentions", "decoder_attentions", "cross_attentions"] SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs[0] SCREAMING_SNAKE_CASE : Any = UMTaForConditionalGeneration(a ).eval() model.to(a ) SCREAMING_SNAKE_CASE : List[Any] = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=a ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=a ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=a ), } for attn_name, (name, mask) in zip(a , head_masking.items() ): SCREAMING_SNAKE_CASE : Optional[int] = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": SCREAMING_SNAKE_CASE : List[Any] = torch.ones( config.num_decoder_layers , config.num_heads , device=a ) SCREAMING_SNAKE_CASE : Optional[Any] = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=a , return_dict_in_generate=a , **a , ) # We check the state of decoder_attentions and cross_attentions just from the last step SCREAMING_SNAKE_CASE : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __UpperCamelCase ( self : Optional[int] ) -> int: """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __UpperCamelCase ( self : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=a ).to(a ) SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=a , legacy=a ) SCREAMING_SNAKE_CASE : int = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] SCREAMING_SNAKE_CASE : Dict = tokenizer(a , return_tensors="pt" , padding=a ).input_ids # fmt: off SCREAMING_SNAKE_CASE : int = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(a , a ) SCREAMING_SNAKE_CASE : Dict = model.generate(input_ids.to(a ) ) SCREAMING_SNAKE_CASE : List[Any] = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(a ) self.assertEqual(a , a )
25
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ): """simple docstring""" with open(__UpperCamelCase ) as metadata_file: A_ = json.load(__UpperCamelCase ) A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] ) # Load in the weights from the checkpoint_path A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) # Load the entity vocab file A_ = load_entity_vocab(__UpperCamelCase ) A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ) # Initialize the embeddings of the special tokens A_ = state_dict["embeddings.word_embeddings.weight"] A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) A_ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A_ = f'''encoder.layer.{layer_index}.attention.self.''' A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A_ = state_dict["entity_embeddings.entity_embeddings.weight"] A_ = entity_emb[entity_vocab["[MASK]"]] A_ = LukeModel(config=__UpperCamelCase ).eval() A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" ) A_ = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) A_ = (39, 42) A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" ) A_ = model(**__UpperCamelCase ) # Verify word hidden states if model_size == "large": A_ = torch.Size((1, 42, 1024) ) A_ = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base A_ = torch.Size((1, 42, 768) ) A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": A_ = torch.Size((1, 1, 1024) ) A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base A_ = torch.Size((1, 1, 768) ) A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__UpperCamelCase ) ) model.save_pretrained(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = {} with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: for index, line in enumerate(__UpperCamelCase ): A_ , A_ = line.rstrip().split("\t" ) A_ = index return entity_vocab if __name__ == "__main__": __a :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __a :Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
86
0
'''simple docstring''' import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): __UpperCamelCase = True from torch.cuda.amp import autocast __UpperCamelCase = logging.getLogger(__name__) @dataclass class _A : lowercase__: str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowercase__: Optional[str] = field( default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowercase__: Optional[bool] = field( default=__lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) lowercase__: Optional[bool] = field( default=__lowercase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) lowercase__: Optional[float] = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) lowercase__: Optional[float] = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) lowercase__: Optional[float] = field( default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __snake_case : int = logging.WARNING if model_args.verbose_logging: __snake_case : Any = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __snake_case : List[str] = logging.INFO logger.setLevel(_lowerCamelCase ) @dataclass class _A : lowercase__: str = field( default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) lowercase__: Optional[str] = field( default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowercase__: Optional[str] = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) lowercase__: Optional[str] = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) lowercase__: Optional[str] = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) lowercase__: bool = field( default=__lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) lowercase__: Optional[int] = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) lowercase__: Optional[int] = field( default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) lowercase__: Optional[float] = field( default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class _A : lowercase__: WavaVecaForPreTraining lowercase__: WavaVecaFeatureExtractor lowercase__: Union[bool, str] = "longest" lowercase__: Optional[int] = None lowercase__: Optional[int] = None def __call__( self : Any , __magic_name__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """simple docstring""" __snake_case : Any = self.feature_extractor.pad( __magic_name__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) __snake_case : int = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) __snake_case : Optional[Any] = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __snake_case : Optional[int] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) __snake_case : Any = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to __snake_case : Dict = 1 __snake_case : Optional[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices __snake_case : int = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__magic_name__ , min_masks=2 , ) return batch class _A ( __lowercase ): def __init__( self : str , *__magic_name__ : Any , __magic_name__ : Optional[Any]=1 , __magic_name__ : int=0 , __magic_name__ : Tuple=1.0 , **__magic_name__ : Any ) -> Dict: """simple docstring""" super().__init__(*__magic_name__ , **__magic_name__ ) __snake_case : List[str] = 0 __snake_case : Optional[Any] = max_gumbel_temp __snake_case : int = min_gumbel_temp __snake_case : Union[str, Any] = gumbel_temp_decay def lowercase__ ( self : Optional[Any] , __magic_name__ : nn.Module , __magic_name__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: """simple docstring""" model.train() __snake_case : int = self._prepare_inputs(__magic_name__ ) if self.use_amp: with autocast(): __snake_case : str = self.compute_loss(__magic_name__ , __magic_name__ ) else: __snake_case : Tuple = self.compute_loss(__magic_name__ , __magic_name__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __snake_case : Union[str, Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __snake_case : str = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __snake_case : List[Any] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(__magic_name__ ).backward() elif self.use_apex: with amp.scale_loss(__magic_name__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(__magic_name__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def _a ( ) -> Any: """simple docstring""" __snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __snake_case , __snake_case , __snake_case : List[Any] = parser.parse_args_into_dataclasses() configure_logger(_lowerCamelCase , _lowerCamelCase ) # Downloading and loading a dataset from the hub. __snake_case : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __snake_case : int = DatasetDict() __snake_case : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) __snake_case : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" __snake_case : Optional[Any] = DatasetDict() __snake_case : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) __snake_case : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported __snake_case : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase ) def prepare_dataset(_lowerCamelCase ): # check that all files have the correct sampling rate __snake_case , __snake_case : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __snake_case : List[str] = datasets.map( _lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long __snake_case : Optional[Any] = vectorized_datasets.filter( lambda _lowerCamelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_lowerCamelCase ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __snake_case : str = vectorized_datasets.map( _lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __snake_case : str = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) __snake_case : Tuple = WavaVecaForPreTraining(_lowerCamelCase ) __snake_case : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase ) __snake_case : List[str] = WavaVecaPreTrainer( model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
26
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __a :Optional[Any] = 'true' def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ): """simple docstring""" set_seed(42 ) A_ = RegressionModel() A_ = deepcopy(__UpperCamelCase ) A_ = RegressionDataset(length=__UpperCamelCase ) A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase ) model.to(accelerator.device ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return model, ddp_model, dataloader def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ): """simple docstring""" A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) A_ = load_dataset("glue" ,"mrpc" ,split="validation" ) def tokenize_function(__UpperCamelCase : Optional[Any] ): A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ) return outputs with accelerator.main_process_first(): A_ = dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,) A_ = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__UpperCamelCase : Union[str, Any] ): if use_longest: return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" ) return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" ) return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ): """simple docstring""" A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase ) A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches ) A_ = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase ) A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] for batch in dataloader: A_ , A_ = batch.values() with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ , A_ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A_ , A_ = [], [] for logit, targ in logits_and_targets: logits.append(__UpperCamelCase ) targs.append(__UpperCamelCase ) A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase ) return logits, targs def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ): """simple docstring""" A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) assert ( len(__UpperCamelCase ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}''' def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ): """simple docstring""" A_ = evaluate.load("glue" ,"mrpc" ) A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase ) # First do baseline A_ , A_ , A_ = setup["no"] model.to(__UpperCamelCase ) model.eval() for batch in dataloader: batch.to(__UpperCamelCase ) with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] ) A_ = metric.compute() # Then do distributed A_ , A_ , A_ = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): A_ = model(**__UpperCamelCase ) A_ = outputs.logits.argmax(dim=-1 ) A_ = batch["labels"] A_ , A_ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase ) A_ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def __snake_case ( ): """simple docstring""" A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__UpperCamelCase ,__UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__UpperCamelCase ,99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) A_ = Accelerator() test_torch_metrics(__UpperCamelCase ,512 ) accelerator.state._reset_state() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
86
0
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase__ ( self ): _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) _A = 'xvjiarui/stable-diffusion-2-inpainting' _A, _A = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ ) _A = 'Face of a yellow cat, high resolution, sitting on a park bench' _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = num_samples * [init_image] _A = num_samples * [mask_image] _A, _A, _A = pipeline.prepare_inputs(snake_case_ , snake_case_ , snake_case_ ) # shard inputs and rng _A = replicate(snake_case_ ) _A = jax.random.split(snake_case_ , jax.device_count() ) _A = shard(snake_case_ ) _A = shard(snake_case_ ) _A = shard(snake_case_ ) _A = pipeline( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ) _A = output.images.reshape(snake_case_ , 512 , 512 , 3 ) _A = images[0, 253:256, 253:256, -1] _A = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _A = jnp.array( [0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
27
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __a :Optional[Any] = 'src/transformers' __a :Tuple = 'docs/source/en/tasks' def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.readlines() # Find the start prompt. A_ = 0 while not lines[start_index].startswith(__UpperCamelCase ): start_index += 1 start_index += 1 A_ = start_index while not lines[end_index].startswith(__UpperCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __a :List[str] = direct_transformers_import(TRANSFORMERS_PATH) __a :Optional[Any] = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __a :Optional[Any] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = TASK_GUIDE_TO_MODELS[task_guide] A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() ) A_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ , A_ , A_ , A_ = _find_text_in_file( filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,) A_ = get_model_list_for_task(__UpperCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' " to fix this." ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a :Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
86
0
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _a ( yaml.SafeLoader ): '''simple docstring''' def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [self.constructed_objects[key_node] for key_node, _ in node.value] SCREAMING_SNAKE_CASE : List[Any] = [tuple(A ) if isinstance(A, A ) else key for key in keys] SCREAMING_SNAKE_CASE : List[str] = Counter(A ) SCREAMING_SNAKE_CASE : Tuple = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = super().construct_mapping(A, deep=A ) self._check_no_duplicates_on_constructed_node(A ) return mapping def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: SCREAMING_SNAKE_CASE : Any = full_content[1:].index('---' ) + 1 SCREAMING_SNAKE_CASE : Tuple = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__UpperCamelCase ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase_ ( cls, A ): '''simple docstring''' with open(A, encoding='utf-8' ) as readme_file: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A ) else: return cls() def UpperCamelCase_ ( self, A ): '''simple docstring''' if path.exists(): with open(A, encoding='utf-8' ) as readme_file: SCREAMING_SNAKE_CASE : Optional[Any] = readme_file.read() else: SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : List[str] = self._to_readme(A ) with open(A, 'w', encoding='utf-8' ) as readme_file: readme_file.write(A ) def UpperCamelCase_ ( self, A = None ): '''simple docstring''' if readme_content is not None: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _split_yaml_from_readme(A ) SCREAMING_SNAKE_CASE : Optional[int] = '---\n' + self.to_yaml_string() + '---\n' + content else: SCREAMING_SNAKE_CASE : Any = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def UpperCamelCase_ ( cls, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = yaml.load(A, Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields SCREAMING_SNAKE_CASE : Union[str, Any] = { (key.replace('-', '_' ) if key.replace('-', '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A ) def UpperCamelCase_ ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_', '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() }, sort_keys=A, allow_unicode=A, encoding='utf-8', ).decode('utf-8' ) UpperCamelCase_ = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCamelCase_ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.") ap.add_argument("readme_filepath") UpperCamelCase_ = ap.parse_args() UpperCamelCase_ = Path(args.readme_filepath) UpperCamelCase_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
28
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __a :Dict = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = ViTConfig() A_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A_ = True A_ = int(vit_name[-12:-10] ) A_ = int(vit_name[-9:-6] ) else: A_ = 1000 A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} A_ = int(vit_name[-6:-4] ) A_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A_ = 192 A_ = 768 A_ = 12 A_ = 3 elif vit_name[9:].startswith("small" ): A_ = 384 A_ = 1536 A_ = 12 A_ = 6 else: pass else: if vit_name[4:].startswith("small" ): A_ = 768 A_ = 2304 A_ = 8 A_ = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 elif vit_name[4:].startswith("huge" ): A_ = 1280 A_ = 5120 A_ = 32 A_ = 16 # load original model from timm A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": A_ = ViTModel(__UpperCamelCase ).eval() else: A_ = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A_ = DeiTImageProcessor(size=config.image_size ) else: A_ = ViTImageProcessor(size=config.image_size ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = encoding["pixel_values"] A_ = model(__UpperCamelCase ) if base_model: A_ = timm_model.forward_features(__UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 ) else: A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[int] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
86
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowerCamelCase_ = '''backbone.''' if is_semantic else '''''' lowerCamelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (f"{prefix}cls_token", '''beit.embeddings.cls_token'''), (f"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''), (f"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''), (f"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ): for i in range(config.num_hidden_layers ): lowerCamelCase_ = '''backbone.''' if is_semantic else '''''' # queries, keys and values lowerCamelCase_ = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" ) lowerCamelCase_ = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" ) lowerCamelCase_ = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" ) lowerCamelCase_ = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase_ = q_bias lowerCamelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase_ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase_ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase_ = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" ) lowerCamelCase_ = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" ) lowerCamelCase_ = gamma_a lowerCamelCase_ = gamma_a def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = dct.pop(lowerCAmelCase__ ) lowerCamelCase_ = val def lowercase ( ): lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ): lowerCamelCase_ = False if '''rvlcdip''' in checkpoint_url else True lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase__ ,use_mask_token=lowerCAmelCase__ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase_ = 1_024 lowerCamelCase_ = 4_096 lowerCamelCase_ = 24 lowerCamelCase_ = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase_ = 16 lowerCamelCase_ = '''huggingface/label-files''' lowerCamelCase_ = '''rvlcdip-id2label.json''' lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) ) lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model'''] lowerCamelCase_ = create_rename_keys(lowerCAmelCase__ ,has_lm_head=lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) read_in_q_k_v(lowerCAmelCase__ ,lowerCAmelCase__ ,has_lm_head=lowerCAmelCase__ ) # load HuggingFace model lowerCamelCase_ = BeitForMaskedImageModeling(lowerCAmelCase__ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase__ ) model.eval() model.load_state_dict(lowerCAmelCase__ ) # Check outputs on an image lowerCamelCase_ = BeitImageProcessor( size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ) lowerCamelCase_ = encoding['''pixel_values'''] lowerCamelCase_ = model(lowerCAmelCase__ ) lowerCamelCase_ = outputs.logits # verify logits lowerCamelCase_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192] assert logits.shape == torch.Size(lowerCAmelCase__ ), "Shape of logits not as expected" Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCAmelCase__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: if has_lm_head: lowerCamelCase_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: lowerCamelCase_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(lowerCAmelCase__ ,lowerCAmelCase__ ) ,organization='''nielsr''' ,commit_message='''Add image processor''' ,use_temp_dir=lowerCAmelCase__ ,) model.push_to_hub( repo_path_or_name=Path(lowerCAmelCase__ ,lowerCAmelCase__ ) ,organization='''nielsr''' ,commit_message='''Add model''' ,use_temp_dir=lowerCAmelCase__ ,) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) A_ = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
29
def __snake_case ( __UpperCamelCase : int = 50 ): """simple docstring""" A_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
86
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = '▁' __a = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __a = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __a = {'vinai/bartpho-syllable': 1_024} class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : str = vocab_file UpperCAmelCase_ : Any = monolingual_vocab_file UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility UpperCAmelCase_ : str = {} UpperCAmelCase_ : Any = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: UpperCAmelCase_ : str = cnt cnt += 1 with open(_SCREAMING_SNAKE_CASE ,'''r''' ,encoding='''utf-8''' ) as f: for line in f.readlines(): UpperCAmelCase_ : int = line.strip().split()[0] UpperCAmelCase_ : Tuple = len(self.fairseq_tokens_to_ids ) if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: UpperCAmelCase_ : Tuple = len(self.fairseq_tokens_to_ids ) UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> str: UpperCAmelCase_ : Any = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: UpperCAmelCase_ : List[str] = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): UpperCAmelCase_ : Optional[Any] = {} UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Any = [self.cls_token_id] UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]: UpperCAmelCase_ : Dict = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a__ ( self ) -> Optional[Any]: return len(self.fairseq_ids_to_tokens ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]: return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int: return self.fairseq_ids_to_tokens[index] def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any: UpperCAmelCase_ : Union[str, Any] = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE ,''' ''' ).strip() return out_string def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Tuple = os.path.join( _SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : List[str] = os.path.join( _SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ,) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE ,'''wb''' ) as fi: UpperCAmelCase_ : str = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file ,_SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_SCREAMING_SNAKE_CASE ,'''w''' ,encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(_SCREAMING_SNAKE_CASE )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
30
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __a :List[str] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , **UpperCAmelCase : List[str] ): super().__init__(**UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type(UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ): if "text_queries" in kwargs: A_ = kwargs.pop("text_queries" ) if isinstance(UpperCAmelCase , (str, Image.Image) ): A_ = {"image": image, "candidate_labels": candidate_labels} else: A_ = image A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase ) return results def __A ( self : int , **UpperCAmelCase : Tuple ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] if "top_k" in kwargs: A_ = kwargs["top_k"] return {}, {}, postprocess_params def __A ( self : List[str] , UpperCAmelCase : Dict ): A_ = load_image(inputs["image"] ) A_ = inputs["candidate_labels"] if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = candidate_labels.split("," ) A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase ): A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework ) A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self : str , UpperCAmelCase : int ): A_ = model_inputs.pop("target_size" ) A_ = model_inputs.pop("candidate_label" ) A_ = model_inputs.pop("is_last" ) A_ = self.model(**UpperCAmelCase ) A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ): A_ = [] for model_output in model_outputs: A_ = model_output["candidate_label"] A_ = BaseModelOutput(UpperCAmelCase ) A_ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): A_ = outputs["scores"][index].item() A_ = self._get_bounding_box(outputs["boxes"][index][0] ) A_ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase ) A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase ) if top_k: A_ = results[:top_k] return results def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
0
import fire from utils import calculate_rouge, save_json def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : List[str] ) -> Any: SCREAMING_SNAKE_CASE_ = [x.strip() for x in open(__UpperCAmelCase ).readlines()] SCREAMING_SNAKE_CASE_ = [x.strip() for x in open(__UpperCAmelCase ).readlines()][: len(__UpperCAmelCase )] SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) if save_path is not None: save_json(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
31
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) __a :int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __a :Tuple = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ): """simple docstring""" A_ = torch.load(__UpperCamelCase ) A_ = WavLMConfigOrig(checkpoint["cfg"] ) A_ = WavLMOrig(__UpperCamelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: A_ = WavLMConfig.from_pretrained(__UpperCamelCase ) else: A_ = WavLMConfig() A_ = WavLMModel(__UpperCamelCase ) recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ) hf_wavlm.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a :Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
0
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase=[32, 64, 128] , _UpperCamelCase=[1, 2, 1] , _UpperCamelCase=[2, 2, 4] , _UpperCamelCase=2 , _UpperCamelCase=2.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=10 , _UpperCamelCase=8 , _UpperCamelCase=["stage1", "stage2"] , _UpperCamelCase=[1, 2] , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCamelCase( self ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase( self ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = FocalNetModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _UpperCAmelCase = None _UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = FocalNetForMaskedImageModeling(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = FocalNetForMaskedImageModeling(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , unittest.TestCase ): __A : List[str] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) __A : Union[str, Any] = ( {"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification} if is_torch_available() else {} ) __A : int = False __A : int = False __A : Dict = False __A : str = False __A : Tuple = False def UpperCamelCase( self ): _UpperCAmelCase = FocalNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase ) def UpperCamelCase( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase( self ): return def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase ) @unittest.skip(reason='''FocalNet does not use inputs_embeds''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''FocalNet does not use feedforward chunking''' ) def UpperCamelCase( self ): pass def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _UpperCAmelCase = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _UpperCAmelCase = model_class(_UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) # FocalNet has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _UpperCAmelCase = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape _UpperCAmelCase = ( reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _UpperCAmelCase = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _UpperCAmelCase = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) ) @slow def UpperCamelCase( self ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = FocalNetModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = _config_zero_init(_UpperCamelCase ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(config=_UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase( self ): # TODO update organization return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None @slow def UpperCamelCase( self ): _UpperCAmelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) _UpperCAmelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Optional[int] = (FocalNetBackbone,) if is_torch_available() else () __A : str = FocalNetConfig __A : Any = False def UpperCamelCase( self ): _UpperCAmelCase = FocalNetModelTester(self )
32
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ): """simple docstring""" A_ = length or len(__UpperCamelCase ) A_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A_ , A_ = list_data[i + 1], list_data[i] A_ = True return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ : List[Any] = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[Any] = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = torch.nn.Linear(10 , 10 ) A_ = torch.optim.SGD(model.parameters() , 0.1 ) A_ = Accelerator() A_ = accelerator.prepare(UpperCAmelCase ) try: pickle.loads(pickle.dumps(UpperCAmelCase ) ) except Exception as e: self.fail(f'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
86
0
"""simple docstring""" import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class snake_case_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> List[Any]: UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_attention_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_choices def UpperCAmelCase__ ( self) -> List[str]: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCamelCase = None if self.use_attention_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCamelCase = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self) -> Any: UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self) -> str: UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class snake_case_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" A_ = True A_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self) -> Optional[int]: UpperCamelCase = FlaxRobertaModelTester(self) @slow def UpperCAmelCase__ ( self) -> Dict: for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCamelCase_) UpperCamelCase = model(np.ones((1, 1))) self.assertIsNotNone(lowerCamelCase_)
34
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a :List[str] = logging.get_logger(__name__) __a :Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __a :Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ = None for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True elif name.split("." )[0] == "proj": A_ = fairseq_model.proj A_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.split(" " )[0] for line in lines] A_ = len(__UpperCamelCase ) A_ = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,): """simple docstring""" A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase ) A_ = SpeechaTextaConfig.from_pretrained( __UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase ) A_ = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ = model[0].eval() # set weights for wav2vec2 encoder A_ = WavaVecaModel(__UpperCamelCase ) A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase ) A_ = SpeechaTextaForCausalLM(__UpperCamelCase ) A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase ) A_ = False # add projection layer A_ = nn.Parameter(projection_layer.weight ) A_ = nn.Parameter(projection_layer.bias ) A_ = create_vocab_dict(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) ) tokenizer.save_pretrained(__UpperCamelCase ) A_ = hf_wavavec.config.to_dict() A_ = tokenizer.pad_token_id A_ = tokenizer.bos_token_id A_ = tokenizer.eos_token_id A_ = "speech_to_text_2" A_ = "wav2vec2" A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) feature_extractor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') __a :Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
86
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ :Optional[Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :Union[str, Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys a_ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
35
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a :str = logging.get_logger(__name__) __a :Any = Dict[str, Any] __a :int = List[Prediction] @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __A ( self : str , **UpperCAmelCase : str ): A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return super().__call__(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any ): A_ = load_image(UpperCAmelCase ) A_ = torch.IntTensor([[image.height, image.width]] ) A_ = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) A_ = target_size return inputs def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = model_inputs.pop("target_size" ) A_ = self.model(**UpperCAmelCase ) A_ = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: A_ = model_inputs["bbox"] return model_outputs def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ): A_ = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_ , A_ = target_size[0].tolist() def unnormalize(UpperCAmelCase : Any ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )] A_ = ["score", "label", "box"] A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = raw_annotations[0] A_ = raw_annotation["scores"] A_ = raw_annotation["labels"] A_ = raw_annotation["boxes"] A_ = scores.tolist() A_ = [self.model.config.idalabel[label.item()] for label in labels] A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ = ["score", "label", "box"] A_ = [ dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) A_ , A_ , A_ , A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
86
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--original_config_file''', type=str, required=True, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--image_size''', default=512, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') def lowercase ( __A : Tuple ) -> Union[str, Any]: '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(f"""could not parse string as bool {string}""" ) parser.add_argument( '''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool ) parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int) __lowercase : Union[str, Any] = parser.parse_args() __lowercase : Optional[int] = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
36
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ , A_ = image.size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image[None].transpose(0 ,3 ,1 ,2 ) A_ = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = 1 elif isinstance(UpperCAmelCase , torch.Tensor ): A_ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' ) if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = preprocess(UpperCAmelCase ) A_ , A_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A_ = (batch_size, self.unet.config.in_channels // 2, height, width) A_ = next(self.unet.parameters() ).dtype A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) A_ = image.to(device=self.device , dtype=UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase , device=self.device ) A_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for t in self.progress_bar(UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. A_ = torch.cat([latents, image] , dim=1 ) A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE A_ = self.vqvae.decode(UpperCAmelCase ).sample A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 ) A_ = image / 2 + 0.5 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
86
0
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __a :list[bool | None] = [None] * 1000_0000 __a :Optional[Any] = True __a :List[Any] = False def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ = chain(next_number(__UpperCamelCase ) ) A_ = number_chain while number < 1000_0000: A_ = number_chain number *= 10 return number_chain def __snake_case ( __UpperCamelCase : int = 1000_0000 ): """simple docstring""" for i in range(1 ,__UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
86
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = CycleDiffusionPipeline lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCamelCase ( self ): torch.manual_seed(0 ) snake_case__ : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , ) snake_case__ : List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) snake_case__ : List[Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) snake_case__ : Any = CLIPTextModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case__ : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ): snake_case__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = image / 2 + 0.5 if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): snake_case__ : Union[str, Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: snake_case__ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) snake_case__ : Any = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def __UpperCamelCase ( self ): snake_case__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : int = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) snake_case__ : Any = pipe(**__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = output.images snake_case__ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) snake_case__ : Optional[int] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __UpperCamelCase ( self ): snake_case__ : str = self.get_dummy_components() for name, module in components.items(): if hasattr(__SCREAMING_SNAKE_CASE , """half""" ): snake_case__ : Dict = module.half() snake_case__ : Any = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = pipe(**__SCREAMING_SNAKE_CASE ) snake_case__ : int = output.images snake_case__ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) snake_case__ : Tuple = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __UpperCamelCase ( self ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def __UpperCamelCase ( self ): return super().test_inference_batch_single_identical() @skip_mps def __UpperCamelCase ( self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def __UpperCamelCase ( self ): return super().test_save_load_optional_components() @skip_mps def __UpperCamelCase ( self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self ): snake_case__ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) snake_case__ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) snake_case__ : Dict = init_image.resize((5_1_2, 5_1_2) ) snake_case__ : Tuple = """CompVis/stable-diffusion-v1-4""" snake_case__ : Any = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" ) snake_case__ : Optional[Any] = CycleDiffusionPipeline.from_pretrained( __SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = """A black colored car""" snake_case__ : int = """A blue colored car""" snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Any = pipe( prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) snake_case__ : List[Any] = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def __UpperCamelCase ( self ): snake_case__ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) snake_case__ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) snake_case__ : Any = init_image.resize((5_1_2, 5_1_2) ) snake_case__ : List[str] = """CompVis/stable-diffusion-v1-4""" snake_case__ : Dict = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" ) snake_case__ : Dict = CycleDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() snake_case__ : Tuple = """A black colored car""" snake_case__ : List[str] = """A blue colored car""" snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : List[str] = pipe( prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) snake_case__ : int = output.images assert np.abs(image - expected_image ).max() < 2e-2
38
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :List[Any] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: snake_case_ = [3, 3, 3, 3] snake_case_ = [5, 5, 5, 5] elif "fl4" in model_name: snake_case_ = [4, 4, 4, 4] snake_case_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: snake_case_ = [3, 3, 3, 3] if "lrf" in model_name: snake_case_ = [3, 3, 3, 3] else: snake_case_ = [2, 2, 2, 2] if "tiny" in model_name: snake_case_ = 96 elif "small" in model_name: snake_case_ = 96 elif "base" in model_name: snake_case_ = 128 elif "large" in model_name: snake_case_ = 192 elif "xlarge" in model_name: snake_case_ = 256 elif "huge" in model_name: snake_case_ = 352 # set label information snake_case_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: snake_case_ = '''imagenet-22k-id2label.json''' else: snake_case_ = '''imagenet-1k-id2label.json''' snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = FocalNetConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , focal_levels=SCREAMING_SNAKE_CASE__ , focal_windows=SCREAMING_SNAKE_CASE__ , use_conv_embed=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , use_post_layernorm=SCREAMING_SNAKE_CASE__ , use_layerscale=SCREAMING_SNAKE_CASE__ , ) return config def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if "patch_embed.proj" in name: snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: snake_case_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: snake_case_ = '''encoder.''' + name if "encoder.layers" in name: snake_case_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: snake_case_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: snake_case_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: snake_case_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: snake_case_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": snake_case_ = '''layernorm.weight''' if name == "norm.bias": snake_case_ = '''layernorm.bias''' if "head" in name: snake_case_ = name.replace('''head''' , '''classifier''' ) else: snake_case_ = '''focalnet.''' + name return name def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): # fmt: off snake_case_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on snake_case_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , SCREAMING_SNAKE_CASE__ ) snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) snake_case_ = val snake_case_ = get_focalnet_config(SCREAMING_SNAKE_CASE__ ) snake_case_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ ) model.eval() # load state dict model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify conversion snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , ) snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) snake_case_ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) snake_case_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) snake_case_ = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": snake_case_ = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": snake_case_ = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": snake_case_ = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": snake_case_ = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": snake_case_ = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
39
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :List[Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Optional[int] , UpperCAmelCase : int ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : List[str] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : Any , UpperCAmelCase : Dict ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Dict , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
86
0