code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="resnet50" , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = out_indices if out_indices is not None else [4]
__a = stage_names
__a = out_features
__a = backbone
__a = batch_size
__a = image_size
__a = num_channels
__a = use_pretrained_backbone
__a = is_training
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = self.get_config()
return config, pixel_values
def a__ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = TimmBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = (TimmBackbone,) if is_torch_available() else ()
_snake_case : int = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_snake_case : Optional[Any] = False
_snake_case : str = False
_snake_case : int = False
_snake_case : str = False
def a__ ( self ):
__a = TimmBackboneModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def a__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ):
__a = "resnet18"
__a = "microsoft/resnet-18"
__a = AutoBackbone.from_pretrained(lowerCamelCase , use_timm_backbone=lowerCamelCase )
__a = AutoBackbone.from_pretrained(lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__a = AutoBackbone.from_pretrained(lowerCamelCase , use_timm_backbone=lowerCamelCase , out_indices=[1, 2, 3] )
__a = AutoBackbone.from_pretrained(lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def a__ ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def a__ ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def a__ ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def a__ ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def a__ ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def a__ ( self ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def a__ ( self ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def a__ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = self.has_attentions
# no need to test all models as different heads yield the same functionality
__a = self.all_model_classes[0]
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__a = model(**lowerCamelCase )
__a = outputs[0][-1]
# Encoder-/Decoder-only models
__a = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__a = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(**lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__a = copy.deepcopy(lowerCamelCase )
__a = None
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(**lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__a = copy.deepcopy(lowerCamelCase )
__a = False
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(**lowerCamelCase )
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase( ):
__a = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=a , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=a , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=a )
return parser.parse_args()
def _lowerCamelCase( ):
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(a )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
@property
def a__ ( self ):
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a__ ( self ):
__a = self.dummy_uncond_unet
__a = KarrasVeScheduler()
__a = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="numpy" ).images
__a = torch.manual_seed(0 )
__a = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="numpy" , return_dict=lowerCamelCase )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "google/ncsnpp-celebahq-256"
__a = UNetaDModel.from_pretrained(lowerCamelCase )
__a = KarrasVeScheduler()
__a = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = pipe(num_inference_steps=20 , generator=lowerCamelCase , output_type="numpy" ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE__:Optional[Any] = get_logger()
SCREAMING_SNAKE_CASE__:Optional[dict] = None
class snake_case__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
super().__init__(features=lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"Expected {device} to be a `str` not {type(lowerCamelCase )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
__a = device if isinstance(lowerCamelCase , lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
__a = str(jax.devices()[0] )
__a = jnp_array_kwargs
@staticmethod
def a__ ( ):
import jax
return {str(lowerCamelCase ): device for device in jax.devices()}
def a__ ( self , lowerCamelCase ):
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase , lowerCamelCase ) and column:
if all(
isinstance(lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCamelCase , axis=0 )
return column
def a__ ( self , lowerCamelCase ):
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase , (str, bytes, type(lowerCamelCase )) ):
return value
elif isinstance(lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__a = {}
if isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {"dtype": jnp.intaa}
else:
__a = {"dtype": jnp.intaa}
elif isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__a = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = np.asarray(lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def a__ ( self , lowerCamelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCamelCase , "__array__" ) and not isinstance(lowerCamelCase , jax.Array ):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] )
elif isinstance(lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return map_nested(self._recursive_tensorize , lowerCamelCase , map_list=lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = self.numpy_arrow_extractor().extract_row(lowerCamelCase )
__a = self.python_features_decoder.decode_row(lowerCamelCase )
return self.recursive_tensorize(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = self.numpy_arrow_extractor().extract_column(lowerCamelCase )
__a = self.python_features_decoder.decode_column(lowerCamelCase , pa_table.column_names[0] )
__a = self.recursive_tensorize(lowerCamelCase )
__a = self._consolidate(lowerCamelCase )
return column
def a__ ( self , lowerCamelCase ):
__a = self.numpy_arrow_extractor().extract_batch(lowerCamelCase )
__a = self.python_features_decoder.decode_batch(lowerCamelCase )
__a = self.recursive_tensorize(lowerCamelCase )
for column_name in batch:
__a = self._consolidate(batch[column_name] )
return batch
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = BertJapaneseTokenizer
_snake_case : Union[str, Any] = False
_snake_case : str = True
def a__ ( self ):
super().setUp()
__a = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def a__ ( self , lowerCamelCase ):
__a = "こんにちは、世界。 \nこんばんは、世界。"
__a = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def a__ ( self , lowerCamelCase ):
__a , __a = self.get_input_output_texts(lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCamelCase )
__a = "こんにちは、世界。\nこんばんは、世界。"
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__a = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(lowerCamelCase , lowerCamelCase )
with open(lowerCamelCase , "rb" ) as handle:
__a = pickle.load(lowerCamelCase )
__a = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
try:
__a = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
try:
__a = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
__a = MecabTokenizer(do_lower_case=lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
try:
__a = MecabTokenizer(
do_lower_case=lowerCamelCase , normalize_text=lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def a__ ( self ):
__a = MecabTokenizer(normalize_text=lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCamelCase )
__a = "こんにちは、世界。\nこんばんは、世界。"
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__a = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(lowerCamelCase , lowerCamelCase )
with open(lowerCamelCase , "rb" ) as handle:
__a = pickle.load(lowerCamelCase )
__a = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(do_lower_case=lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(normalize_text=lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(trim_whitespace=lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCamelCase )
__a = "こんにちは、世界。\nこんばんは、世界。"
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__a = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(lowerCamelCase , lowerCamelCase )
with open(lowerCamelCase , "rb" ) as handle:
__a = pickle.load(lowerCamelCase )
__a = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer(normalize_text=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer(trim_whitespace=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def a__ ( self ):
__a = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
__a = {}
for i, token in enumerate(lowerCamelCase ):
__a = i
__a = WordpieceTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def a__ ( self ):
__a = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
__a = tokenizer.subword_tokenizer
__a = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
__a = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def a__ ( self ):
__a = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
__a = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Dict = BertJapaneseTokenizer
_snake_case : str = False
def a__ ( self ):
super().setUp()
__a = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def a__ ( self , **lowerCamelCase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = "こんにちは、世界。 \nこんばんは、世界。"
__a = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
__a = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def a__ ( self ):
__a = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__a = {}
for i, token in enumerate(lowerCamelCase ):
__a = i
__a = CharacterTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def a__ ( self ):
__a = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
__a = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "cl-tohoku/bert-base-japanese"
__a = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
__a = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def a__ ( self , lowerCamelCase ):
# configuration for running training on smdistributed Model Parallel
__a = {
"enabled": True,
"processes_per_host": 8,
}
__a = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__a = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__a = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="py36" , )
def a__ ( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def a__ ( self , lowerCamelCase ):
# create estimator
__a = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
def _lowerCamelCase( a ):
__a = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__a = 1_0_2_4
__a = 4_0_9_6
__a = 2_4
__a = 1_6
__a = [5, 1_1, 1_7, 2_3]
__a = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__a = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__a = 7_6_8
__a = [1, 1, 1, 0.5]
__a = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__a = 1_5_0
__a = 1_6
__a = (1, 3_8_4, 3_8_4)
__a = False
__a = "project"
if "ade" in checkpoint_url:
__a = True
__a = 7_6_8
__a = [1, 1, 1, 0.5]
__a = 1_5_0
__a = 1_6
__a = "huggingface/label-files"
__a = "ade20k-id2label.json"
__a = json.load(open(cached_download(hf_hub_url(a , a , repo_type="dataset" ) ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCamelCase( a ):
__a = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(a , a )
def _lowerCamelCase( a ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__a = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__a = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__a = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__a = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__a = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__a = name.replace("proj" , "projection" )
if "blocks" in name:
__a = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__a = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__a = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__a = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__a = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__a = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__a = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__a = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__a = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__a = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__a = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__a = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__a = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__a = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__a = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__a = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__a = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__a = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__a = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__a = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__a = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__a = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__a = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__a = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__a = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__a = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__a = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__a = name.replace("pretrained" , "dpt" )
if "bn" in name:
__a = name.replace("bn" , "batch_norm" )
if "head" in name:
__a = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__a = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__a = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__a = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__a = name.replace(".." , "." )
if "stem.conv" in name:
__a = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__a = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__a = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__a = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__a = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__a = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__a = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _lowerCamelCase( a , a ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
__a = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[: config.hidden_size, :]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a ):
__a , __a = get_dpt_config(a )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__a = torch.load(a , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(a )
# rename keys
for key in state_dict.copy().keys():
__a = state_dict.pop(a )
__a = val
# read in qkv matrices
read_in_q_k_v(a , a )
# load HuggingFace model
__a = DPTForSemanticSegmentation(a ) if "ade" in checkpoint_url else DPTForDepthEstimation(a )
model.load_state_dict(a )
model.eval()
# Check outputs on an image
__a = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__a = DPTImageProcessor(size=a )
__a = prepare_img()
__a = image_processor(a , return_tensors="pt" )
# forward pass
__a = model(**a ).logits if "ade" in checkpoint_url else model(**a ).predicted_depth
if show_prediction:
__a = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=a , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(a ).mkdir(exist_ok=a )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """perceiver"""
def __init__( self , lowerCamelCase=256 , lowerCamelCase=1280 , lowerCamelCase=768 , lowerCamelCase=1 , lowerCamelCase=26 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="kv" , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=262 , lowerCamelCase=2048 , lowerCamelCase=56 , lowerCamelCase=[368, 496] , lowerCamelCase=16 , lowerCamelCase=1920 , lowerCamelCase=16 , lowerCamelCase=[1, 16, 224, 224] , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = num_latents
__a = d_latents
__a = d_model
__a = num_blocks
__a = num_self_attends_per_block
__a = num_self_attention_heads
__a = num_cross_attention_heads
__a = qk_channels
__a = v_channels
__a = cross_attention_shape_for_attention
__a = self_attention_widening_factor
__a = cross_attention_widening_factor
__a = hidden_act
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = use_query_residual
# masked language modeling attributes
__a = vocab_size
__a = max_position_embeddings
# image classification attributes
__a = image_size
# flow attributes
__a = train_size
# multimodal autoencoding attributes
__a = num_frames
__a = audio_samples_per_frame
__a = samples_per_patch
__a = output_shape
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def a__ ( self ):
return 1E-4
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = 3 , lowerCamelCase = 40 , lowerCamelCase = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCamelCase , lowerCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = preprocessor.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join(["a"] ) * seq_length] * batch_size
__a = dict(preprocessor(lowerCamelCase , return_tensors=lowerCamelCase ) )
__a = inputs.pop("input_ids" )
return inputs
elif isinstance(lowerCamelCase , lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
__a = self._generate_dummy_images(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = dict(preprocessor(images=lowerCamelCase , return_tensors=lowerCamelCase ) )
__a = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class snake_case__ :
_snake_case : float
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
def _lowerCamelCase( a ):
# Validation
def is_valid_tree(a ) -> bool:
if node is None:
return True
if not isinstance(a , a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
a , a , a ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a )
)
return is_binary_search_tree_recursive_check(a , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""ChineseCLIPFeatureExtractor"""]
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from typing import Any
import numpy as np
def _lowerCamelCase( a ):
return np.array_equal(a , matrix.conjugate().T )
def _lowerCamelCase( a , a ):
__a = v.conjugate().T
__a = v_star.dot(a )
assert isinstance(a , np.ndarray )
return (v_star_dot.dot(a )) / (v_star.dot(a ))
def _lowerCamelCase( ):
__a = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
__a = np.array([[1], [2], [3]] )
assert is_hermitian(a ), F"{a} is not hermitian."
print(rayleigh_quotient(a , a ) )
__a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a ), F"{a} is not hermitian."
assert rayleigh_quotient(a , a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Dict = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Any = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
SCREAMING_SNAKE_CASE__:Tuple = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def _lowerCamelCase( a , a = 1 , a = "new" , a = None ):
__a = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
__a = F"Invalid search term: {invalid_search_terms}"
raise ValueError(a )
__a = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
__a = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
__a = {}
for id_ in range(a ):
__a = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(a , a ):
raise TypeError("Input value must be a 'int' type" )
return bin(a ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = """Speech2TextFeatureExtractor"""
_snake_case : Dict = """Speech2TextTokenizer"""
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__(lowerCamelCase , lowerCamelCase )
__a = self.feature_extractor
__a = False
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__a = kwargs.pop("raw_speech" )
else:
__a = kwargs.pop("audio" , lowerCamelCase )
__a = kwargs.pop("sampling_rate" , lowerCamelCase )
__a = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__a = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__a = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["input_ids"]
return inputs
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def a__ ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
def _lowerCamelCase( a , a=False ):
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _lowerCamelCase( a , a , a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a = ""
else:
__a = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__a = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[
: config.hidden_size, :
]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase( a ):
__a = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(a , a )
def _lowerCamelCase( a , a , a ):
__a = dct.pop(a )
__a = val
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( a , a , a=True ):
__a = ViTConfig()
# patch_size
if model_name[-1] == "8":
__a = 8
# set labels if required
if not base_model:
__a = 1_0_0_0
__a = "huggingface/label-files"
__a = "imagenet-1k-id2label.json"
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__a = 3_8_4
__a = 1_5_3_6
__a = 1_2
__a = 6
# load original model from torch hub
__a = torch.hub.load("facebookresearch/dino:main" , a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__a = original_model.state_dict()
if base_model:
remove_classification_head_(a )
__a = create_rename_keys(a , base_model=a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
if base_model:
__a = ViTModel(a , add_pooling_layer=a ).eval()
else:
__a = ViTForImageClassification(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by ViTImageProcessor
__a = ViTImageProcessor()
__a = image_processor(images=prepare_img() , return_tensors="pt" )
__a = encoding["pixel_values"]
__a = model(a )
if base_model:
__a = original_model(a )
assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__a = original_model(a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1E-3 )
Path(a ).mkdir(exist_ok=a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
SCREAMING_SNAKE_CASE__:Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a = 1_0_0 ):
__a = n * (n + 1) * (2 * n + 1) / 6
__a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **lowerCamelCase ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a = deprecated_arg[3:]
__a = not kwargs.pop(lowerCamelCase )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
__a = kwargs.pop("tpu_name" , self.tpu_name )
__a = kwargs.pop("device_idx" , self.device_idx )
__a = kwargs.pop("eager_mode" , self.eager_mode )
__a = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowerCamelCase )
_snake_case : str = field(
default=snake_case_, metadata={"""help""": """Name of TPU"""}, )
_snake_case : int = field(
default=0, metadata={"""help""": """CPU / GPU device index. Defaults to 0."""}, )
_snake_case : bool = field(default=snake_case_, metadata={"""help""": """Benchmark models in eager model."""} )
_snake_case : bool = field(
default=snake_case_, metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
}, )
@cached_property
def a__ ( self ):
requires_backends(self , ["tf"] )
__a = None
if self.tpu:
try:
if self.tpu_name:
__a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__a = None
return tpu
@cached_property
def a__ ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__a = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__a = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__a = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def a__ ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def a__ ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def a__ ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def a__ ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ):
return self.n_gpu > 0
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( ):
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def _lowerCamelCase( a ):
__a = 1
__a = 2
while i * i <= n:
__a = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCamelCase( ):
return next(i for i in triangle_number_generator() if count_divisors(a ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "weight" in name:
__a = "weight"
elif "bias" in name:
__a = "bias"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a=None , a=None , a=True ):
if config_path is not None:
__a = HubertConfig.from_pretrained(a )
else:
__a = HubertConfig()
if is_finetuned:
if dict_path:
__a = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(a , "vocab.json" )
if not os.path.isdir(a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a ) )
return
os.makedirs(a , exist_ok=a )
with open(a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , a )
__a = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a , )
__a = True if config.feat_extract_norm == "layer" else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__a = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__a = HubertForCTC(a )
else:
__a = HubertModel(a )
if is_finetuned:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a = model[0].eval()
recursively_load_weights(a , a , a )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = TFViTModel(config=lowerCamelCase )
__a = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase , training=lowerCamelCase )
__a = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = TFViTForImageClassification(lowerCamelCase )
__a = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = TFViTForImageClassification(lowerCamelCase )
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case : Any = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case : str = False
_snake_case : Optional[Any] = False
_snake_case : List[Any] = False
def a__ ( self ):
__a = TFViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , tf.keras.layers.Layer ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
__a = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
__a = model(**lowerCamelCase )
# verify the logits
__a = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 )
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__:str = """ResNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__:List[Any] = """microsoft/resnet-50"""
SCREAMING_SNAKE_CASE__:Union[str, Any] = [1, 2048, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__:Tuple = """microsoft/resnet-50"""
SCREAMING_SNAKE_CASE__:Union[str, Any] = """tiger cat"""
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 3 , lowerCamelCase = 1 , lowerCamelCase = "relu" ):
super().__init__()
__a = nn.Convad(
lowerCamelCase , lowerCamelCase , kernel_size=lowerCamelCase , stride=lowerCamelCase , padding=kernel_size // 2 , bias=lowerCamelCase )
__a = nn.BatchNormad(lowerCamelCase )
__a = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self , lowerCamelCase ):
__a = self.convolution(lowerCamelCase )
__a = self.normalization(lowerCamelCase )
__a = self.activation(lowerCamelCase )
return hidden_state
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__a = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__a = config.num_channels
def a__ ( self , lowerCamelCase ):
__a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
__a = self.embedder(lowerCamelCase )
__a = self.pooler(lowerCamelCase )
return embedding
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 ):
super().__init__()
__a = nn.Convad(lowerCamelCase , lowerCamelCase , kernel_size=1 , stride=lowerCamelCase , bias=lowerCamelCase )
__a = nn.BatchNormad(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = self.convolution(lowerCamelCase )
__a = self.normalization(lowerCamelCase )
return hidden_state
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = "relu" ):
super().__init__()
__a = in_channels != out_channels or stride != 1
__a = (
ResNetShortCut(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
__a = nn.Sequential(
ResNetConvLayer(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase ) , ResNetConvLayer(lowerCamelCase , lowerCamelCase , activation=lowerCamelCase ) , )
__a = ACTaFN[activation]
def a__ ( self , lowerCamelCase ):
__a = hidden_state
__a = self.layer(lowerCamelCase )
__a = self.shortcut(lowerCamelCase )
hidden_state += residual
__a = self.activation(lowerCamelCase )
return hidden_state
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = "relu" , lowerCamelCase = 4 ):
super().__init__()
__a = in_channels != out_channels or stride != 1
__a = out_channels // reduction
__a = (
ResNetShortCut(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
__a = nn.Sequential(
ResNetConvLayer(lowerCamelCase , lowerCamelCase , kernel_size=1 ) , ResNetConvLayer(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase ) , ResNetConvLayer(lowerCamelCase , lowerCamelCase , kernel_size=1 , activation=lowerCamelCase ) , )
__a = ACTaFN[activation]
def a__ ( self , lowerCamelCase ):
__a = hidden_state
__a = self.layer(lowerCamelCase )
__a = self.shortcut(lowerCamelCase )
hidden_state += residual
__a = self.activation(lowerCamelCase )
return hidden_state
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
super().__init__()
__a = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
__a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , activation=config.hidden_act ) , *[layer(lowerCamelCase , lowerCamelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a__ ( self , lowerCamelCase ):
__a = input
for layer in self.layers:
__a = layer(lowerCamelCase )
return hidden_state
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase ) )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ):
__a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a = hidden_states + (hidden_state,)
__a = stage_module(lowerCamelCase )
if output_hidden_states:
__a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase , )
class snake_case__ ( snake_case_ ):
_snake_case : Union[str, Any] = ResNetConfig
_snake_case : Tuple = """resnet"""
_snake_case : Optional[Any] = """pixel_values"""
_snake_case : Union[str, Any] = True
def a__ ( self , lowerCamelCase ):
if isinstance(lowerCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = value
SCREAMING_SNAKE_CASE__:List[str] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__:Tuple = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""", snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config
__a = ResNetEmbeddings(lowerCamelCase )
__a = ResNetEncoder(lowerCamelCase )
__a = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ):
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.embedder(lowerCamelCase )
__a = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""", snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config.num_labels
__a = ResNetModel(lowerCamelCase )
# classification head
__a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.resnet(lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier(lowerCamelCase )
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = "single_label_classification"
else:
__a = "multi_label_classification"
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a = loss_fct(lowerCamelCase , lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(lowerCamelCase , lowerCamelCase )
if not return_dict:
__a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""", snake_case_, )
class snake_case__ ( snake_case_, snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
super()._init_backbone(lowerCamelCase )
__a = [config.embedding_size] + config.hidden_sizes
__a = ResNetEmbeddings(lowerCamelCase )
__a = ResNetEncoder(lowerCamelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = self.embedder(lowerCamelCase )
__a = self.encoder(lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase )
__a = outputs.hidden_states
__a = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__a = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCamelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCamelCase , )
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
import re
def _lowerCamelCase( a ):
if len(re.findall("[ATCG]" , a ) ) != len(a ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = BertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = BertForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
import math
def _lowerCamelCase( a ):
__a = []
__a = 2
__a = int(math.sqrt(a ) ) # Size of every segment
__a = [True] * (end + 1)
__a = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__a = False
start += 1
prime += in_prime
__a = end + 1
__a = min(2 * end , a )
while low <= n:
__a = [True] * (high - low + 1)
for each in in_prime:
__a = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__a = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__a = high + 1
__a = min(high + end , a )
return prime
print(sieve(10**6))
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[str] = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """realm"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=128 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=8 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=256 , lowerCamelCase=10 , lowerCamelCase=1E-3 , lowerCamelCase=5 , lowerCamelCase=320 , lowerCamelCase=13353718 , lowerCamelCase=5000 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
# Common config
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = retriever_proj_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_candidates
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
# Reader config
__a = span_hidden_size
__a = max_span_width
__a = reader_layer_norm_eps
__a = reader_beam_size
__a = reader_seq_len
# Retrieval config
__a = num_block_records
__a = searcher_beam_size
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case__ ( snake_case_ ):
_snake_case : bool = field(default=snake_case_, metadata={"""help""": """Whether to use SortishSampler or not."""} )
_snake_case : bool = field(
default=snake_case_, metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_snake_case : Optional[int] = field(
default=snake_case_, metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
}, )
_snake_case : Optional[int] = field(
default=snake_case_, metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
}, )
_snake_case : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_, metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
}, )
def a__ ( self ):
__a = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = v.to_dict()
return d
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
__a = len(a )
print("The following activities are selected:" )
# The first activity is always selected
__a = 0
print(a , end="," )
# Consider rest of the activities
for j in range(a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a , end="," )
__a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__:str = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE__:Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
import torch
def _lowerCamelCase( ):
if torch.cuda.is_available():
__a = torch.cuda.device_count()
else:
__a = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a ):
if not nums:
return 0
__a = nums[0]
__a = 0
for num in nums[1:]:
__a , __a = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
SCREAMING_SNAKE_CASE__:int = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
SCREAMING_SNAKE_CASE__:List[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
SCREAMING_SNAKE_CASE__:List[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="auto" , lowerCamelCase=-1 , lowerCamelCase=0.9 , lowerCamelCase=5 , lowerCamelCase=500 , lowerCamelCase="gpt2-large" , lowerCamelCase=-1 , lowerCamelCase=1024 , lowerCamelCase=25 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase=25 , ):
__a = compute_mauve(
p_text=lowerCamelCase , q_text=lowerCamelCase , p_features=lowerCamelCase , q_features=lowerCamelCase , p_tokens=lowerCamelCase , q_tokens=lowerCamelCase , num_buckets=lowerCamelCase , pca_max_data=lowerCamelCase , kmeans_explained_var=lowerCamelCase , kmeans_num_redo=lowerCamelCase , kmeans_max_iter=lowerCamelCase , featurize_model_name=lowerCamelCase , device_id=lowerCamelCase , max_text_length=lowerCamelCase , divergence_curve_discretization_size=lowerCamelCase , mauve_scaling_factor=lowerCamelCase , verbose=lowerCamelCase , seed=lowerCamelCase , )
return out
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase = 6 ):
__a = None
__a = None
self.create_linked_list(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = Node()
__a = current_node
__a = current_node
__a = current_node
for _ in range(1 , lowerCamelCase ):
__a = Node()
__a = current_node
__a = previous_node
__a = current_node
__a = self.front
__a = previous_node
def a__ ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def a__ ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def a__ ( self , lowerCamelCase ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__a = self.rear.next
if self.rear:
__a = data
def a__ ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__a = self.front.data
__a = None
return data
__a = self.front
__a = old_front.next
__a = old_front.data
__a = None
return data
def a__ ( self ):
if self.is_empty():
raise Exception("Empty Queue" )
def a__ ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class snake_case__ :
def __init__( self ):
__a = None
__a = None
__a = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = """vision-encoder-decoder"""
_snake_case : Any = True
def __init__( self , **lowerCamelCase ):
super().__init__(**lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
__a = kwargs.pop("encoder" )
__a = encoder_config.pop("model_type" )
__a = kwargs.pop("decoder" )
__a = decoder_config.pop("model_type" )
__a = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
__a = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
__a = True
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__a = True
__a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.encoder.to_dict()
__a = self.decoder.to_dict()
__a = self.__class__.model_type
return output
class snake_case__ ( snake_case_ ):
_snake_case : Dict = version.parse("""1.11""" )
@property
def a__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a__ ( self ):
return 1E-4
@property
def a__ ( self ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
__a = OrderedDict()
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
__a = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
import torch
__a = OrderedDict()
__a = super().generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
__a , __a = dummy_input["input_ids"].shape
__a = (batch, encoder_sequence, self._config.encoder_hidden_size)
__a = dummy_input.pop("input_ids" )
__a = dummy_input.pop("attention_mask" )
__a = torch.zeros(lowerCamelCase )
return common_inputs
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
pass
def a__ ( self , lowerCamelCase ):
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = "default" ):
__a = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase , lowerCamelCase )
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
class snake_case__ :
def __init__( self ):
__a = 0
__a = 0
__a = {}
def a__ ( self , lowerCamelCase ):
if vertex not in self.adjacency:
__a = {}
self.num_vertices += 1
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.add_vertex(lowerCamelCase )
self.add_vertex(lowerCamelCase )
if head == tail:
return
__a = weight
__a = weight
def a__ ( self ):
__a = self.get_edges()
for edge in edges:
__a , __a , __a = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase ) ):
__a = list(edges[i] )
edges.sort(key=lambda lowerCamelCase : e[2] )
for i in range(len(lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__a = edges[i][2] + 1
for edge in edges:
__a , __a , __a = edge
__a = weight
__a = weight
def __str__( self ):
__a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__a = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip("\n" )
def a__ ( self ):
__a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self ):
return self.adjacency.keys()
@staticmethod
def a__ ( lowerCamelCase=None , lowerCamelCase=None ):
__a = Graph()
if vertices is None:
__a = []
if edges is None:
__a = []
for vertex in vertices:
g.add_vertex(lowerCamelCase )
for edge in edges:
g.add_edge(*lowerCamelCase )
return g
class snake_case__ :
def __init__( self ):
__a = {}
__a = {}
def __len__( self ):
return len(self.parent )
def a__ ( self , lowerCamelCase ):
if item in self.parent:
return self.find(lowerCamelCase )
__a = item
__a = 0
return item
def a__ ( self , lowerCamelCase ):
if item not in self.parent:
return self.make_set(lowerCamelCase )
if item != self.parent[item]:
__a = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = self.find(lowerCamelCase )
__a = self.find(lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__a = roota
return roota
if self.rank[roota] < self.rank[roota]:
__a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__a = roota
return roota
return None
@staticmethod
def a__ ( lowerCamelCase ):
__a = graph.num_vertices
__a = Graph.UnionFind()
__a = []
while num_components > 1:
__a = {}
for vertex in graph.get_vertices():
__a = -1
__a = graph.get_edges()
for edge in edges:
__a , __a , __a = edge
edges.remove((tail, head, weight) )
for edge in edges:
__a , __a , __a = edge
__a = union_find.find(lowerCamelCase )
__a = union_find.find(lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__a , __a , __a = cheap_edge[vertex]
if union_find.find(lowerCamelCase ) != union_find.find(lowerCamelCase ):
union_find.union(lowerCamelCase , lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
__a = num_components - 1
__a = Graph.build(edges=lowerCamelCase )
return mst
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
from PIL import Image
def _lowerCamelCase( a ):
__a , __a = image.size
__a = 0
__a = image.load()
for i in range(a ):
for j in range(a ):
__a = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a ):
for i in range(a ):
__a = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
from itertools import permutations
def _lowerCamelCase( a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__a = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(a ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def _lowerCamelCase( a = 1_0 ):
return sum(
int("".join(map(a , a ) ) )
for num in permutations(range(a ) )
if is_substring_divisible(a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class snake_case__ ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def a__ ( self , lowerCamelCase ):
__a = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase , config_name=lowerCamelCase )
__a = GenerationConfig.from_pretrained(lowerCamelCase , config_name=lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCamelCase )
def a__ ( self ):
__a = AutoConfig.from_pretrained("gpt2" )
__a = GenerationConfig.from_model_config(lowerCamelCase )
__a = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def a__ ( self ):
__a = GenerationConfig()
__a = {
"max_new_tokens": 1024,
"foo": "bar",
}
__a = copy.deepcopy(lowerCamelCase )
__a = generation_config.update(**lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase , lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase , {"foo": "bar"} )
def a__ ( self ):
__a = GenerationConfig()
__a = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase )
__a = GenerationConfig.from_pretrained(lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
__a = GenerationConfig.from_model_config(lowerCamelCase )
assert not hasattr(lowerCamelCase , "foo" ) # no new kwargs should be initialized if from config
def a__ ( self ):
__a = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
__a = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase )
__a = GenerationConfig.from_pretrained(lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class snake_case__ ( unittest.TestCase ):
@classmethod
def a__ ( cls ):
__a = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def a__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def a__ ( self ):
__a = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
__a = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase , repo_id="test-generation-config" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
__a = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def a__ ( self ):
__a = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
__a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
__a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__:Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
SCREAMING_SNAKE_CASE__:Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
SCREAMING_SNAKE_CASE__:Any = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Dict = CamembertTokenizer
_snake_case : Dict = CamembertTokenizerFast
_snake_case : Any = True
_snake_case : List[str] = True
def a__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ):
__a = "<pad>"
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def a__ ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def a__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def a__ ( self ):
__a = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
__a = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__a = "I was born in 92000, and this is falsé."
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "I was born in 92000, and this is falsé."
__a = tokenizer.tokenize(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
# fmt: off
__a = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__a = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCamelCase , )
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
import os
def _lowerCamelCase( ):
__a = os.path.dirname(os.path.realpath(a ) )
__a = os.path.join(a , "triangle.txt" )
with open(a ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a ) )
a.append(a )
for i in range(1 , len(a ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a , a )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Any = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE__:List[Any] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class snake_case__ ( snake_case_ ):
_snake_case : Dict = """albert"""
def __init__( self , lowerCamelCase=30000 , lowerCamelCase=128 , lowerCamelCase=4096 , lowerCamelCase=12 , lowerCamelCase=1 , lowerCamelCase=64 , lowerCamelCase=16384 , lowerCamelCase=1 , lowerCamelCase="gelu_new" , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0.1 , lowerCamelCase="absolute" , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=3 , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__a = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase , cache_dir=lowerCamelCase )
__a = [t[-1] for t in os.walk(os.path.join(lowerCamelCase , os.listdir(lowerCamelCase )[0] , "snapshots" ) )]
__a = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase )
__a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__a = jax.random.PRNGKey(0 )
__a = 4
__a = jax.device_count()
__a = num_samples * [prompt]
__a = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
__a = replicate(lowerCamelCase )
__a = jax.random.split(lowerCamelCase , lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__a = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def a__ ( self ):
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCamelCase )
__a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__a = jax.random.PRNGKey(0 )
__a = 50
__a = jax.device_count()
__a = num_samples * [prompt]
__a = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
__a = replicate(lowerCamelCase )
__a = jax.random.split(lowerCamelCase , lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def a__ ( self ):
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase )
__a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__a = jax.random.PRNGKey(0 )
__a = 50
__a = jax.device_count()
__a = num_samples * [prompt]
__a = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
__a = replicate(lowerCamelCase )
__a = jax.random.split(lowerCamelCase , lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def a__ ( self ):
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
__a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__a = jax.random.PRNGKey(0 )
__a = 50
__a = jax.device_count()
__a = num_samples * [prompt]
__a = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
__a = replicate(lowerCamelCase )
__a = jax.random.split(lowerCamelCase , lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def a__ ( self ):
__a = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , )
__a = scheduler.create_state()
__a = scheduler_state
__a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__a = jax.random.PRNGKey(0 )
__a = 50
__a = jax.device_count()
__a = num_samples * [prompt]
__a = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
__a = replicate(lowerCamelCase )
__a = jax.random.split(lowerCamelCase , lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def a__ ( self ):
__a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__a = jax.device_count()
__a = num_samples * [prompt]
__a = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase )
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , )
__a = replicate(lowerCamelCase )
__a = pipeline.prepare_inputs(lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__a = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , use_memory_efficient_attention=lowerCamelCase , )
__a = replicate(lowerCamelCase )
__a = pipeline.prepare_inputs(lowerCamelCase )
__a = shard(lowerCamelCase )
__a = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__a = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__:Tuple = """base_with_context"""
def _lowerCamelCase( a , a ):
__a = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
__a = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=a )
for lyr_num, lyr in enumerate(model.encoders ):
__a = weights[F"layers_{lyr_num}"]
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__a = ly_weight["attention"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _lowerCamelCase( a , a ):
__a = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=a )
for lyr_num, lyr in enumerate(model.encoders ):
__a = weights[F"layers_{lyr_num}"]
__a = ly_weight["attention"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _lowerCamelCase( a , a ):
__a = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=a )
__a = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__a = weights[F"layers_{lyr_num}"]
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
__a = ly_weight["self_attention"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = ly_weight["MultiHeadDotProductAttention_0"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def _lowerCamelCase( a ):
__a = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__a = jnp.tree_util.tree_map(onp.array , a )
__a = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
__a = os.path.join(args.checkpoint_path , ".." , "config.gin" )
__a = inference.parse_training_gin_file(a , a )
__a = inference.InferenceModel(args.checkpoint_path , a )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
__a = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__a = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__a = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__a = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , a )
__a = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , a )
__a = load_decoder(ta_checkpoint["target"]["decoder"] , a )
__a = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
__a = SpectrogramDiffusionPipeline(
notes_encoder=a , continuous_encoder=a , decoder=a , scheduler=a , melgan=a , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
SCREAMING_SNAKE_CASE__:Dict = parser.parse_args()
main(args)
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a , a ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__a , __a = array[indexa], array[indexa]
def _lowerCamelCase( a , a , a , a ):
if length > 1:
__a = int(length / 2 )
for i in range(a , low + middle ):
comp_and_swap(a , a , i + middle , a )
bitonic_merge(a , a , a , a )
bitonic_merge(a , low + middle , a , a )
def _lowerCamelCase( a , a , a , a ):
if length > 1:
__a = int(length / 2 )
bitonic_sort(a , a , a , 1 )
bitonic_sort(a , low + middle , a , 0 )
bitonic_merge(a , a , a , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__:Tuple = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE__:str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE__:list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE__:set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE__:list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCamelCase( a , a ):
__a = ""
__a = 42
__a = 42
__a = 42
for keychar, cipherchar in zip(cycle(a ) , a ):
__a = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a )
return decoded
def _lowerCamelCase( a ):
__a = []
for key in product(a , repeat=3 ):
__a = try_key(a , a )
if encoded is not None:
possibles.append(a )
return possibles
def _lowerCamelCase( a , a ):
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCamelCase( a = "p059_cipher.txt" ):
__a = 42
__a = 42
__a = 42
__a = 42
__a = Path(a ).parent.joinpath(a ).read_text(encoding="utf-8" )
__a = [int(a ) for number in data.strip().split("," )]
__a = filter_valid_chars(a )
for common_word in COMMON_WORDS:
__a = filter_common_word(a , a )
if len(a ) == 1:
break
__a = possibles[0]
return sum(ord(a ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowerCamelCase( a ):
__a = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _lowerCamelCase( a , a ):
__a = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _lowerCamelCase( a ):
__a = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token") )
return token
def _lowerCamelCase( ):
__a = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def _lowerCamelCase( a , a , a , a ):
__a = "imagenet-1k-id2label.json"
__a = 1_0_0_0
__a = "huggingface/label-files"
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(a , a , repo_type="dataset" ) ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__a = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__a = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 2_0]
__a = [3, 1_2, 1_6]
__a = [1_9_2, 7_6_8, 1_0_2_4]
__a = CvtForImageClassification(a )
__a = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__a = image_size
__a = torch.load(a , map_location=torch.device("cpu" ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(a )
__a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(a , a )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _lowerCamelCase( a , a="shi-labs/oneformer_demo" ):
with open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) as f:
__a = json.load(a )
__a = {}
__a = []
__a = []
for key, info in class_info.items():
__a = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(a ) )
__a = thing_ids
__a = class_names
return metadata
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=10 , lowerCamelCase=False , lowerCamelCase=255 , lowerCamelCase="shi-labs/oneformer_demo" , lowerCamelCase="ade20k_panoptic.json" , lowerCamelCase=10 , ):
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
__a = do_normalize
__a = image_mean
__a = image_std
__a = class_info_file
__a = prepare_metadata(lowerCamelCase , lowerCamelCase )
__a = num_text
__a = repo_path
# for the post_process_functions
__a = 2
__a = 10
__a = 10
__a = 3
__a = 4
__a = num_labels
__a = do_reduce_labels
__a = ignore_index
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
def a__ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case : Tuple = image_processing_class
def a__ ( self ):
__a = OneFormerImageProcessorTester(self )
@property
def a__ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "ignore_index" ) )
self.assertTrue(hasattr(lowerCamelCase , "class_info_file" ) )
self.assertTrue(hasattr(lowerCamelCase , "num_text" ) )
self.assertTrue(hasattr(lowerCamelCase , "repo_path" ) )
self.assertTrue(hasattr(lowerCamelCase , "metadata" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_reduce_labels" ) )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processor(
lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processor(
lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processor(
lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase="np" ):
__a = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__a = self.image_processing_tester.num_labels
__a = None
__a = None
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase )
if with_segmentation_maps:
__a = num_labels
if is_instance_map:
__a = list(range(lowerCamelCase ) ) * 2
__a = dict(enumerate(lowerCamelCase ) )
__a = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__a = [Image.fromarray(lowerCamelCase ) for annotation in annotations]
__a = image_processor(
lowerCamelCase , ["semantic"] * len(lowerCamelCase ) , lowerCamelCase , return_tensors="pt" , instance_id_to_semantic_id=lowerCamelCase , pad_and_return_pixel_mask=lowerCamelCase , )
return inputs
def a__ ( self ):
pass
def a__ ( self ):
def common(lowerCamelCase=False , lowerCamelCase=None ):
__a = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase , is_instance_map=lowerCamelCase , segmentation_type=lowerCamelCase )
__a = inputs["mask_labels"]
__a = inputs["class_labels"]
__a = inputs["pixel_values"]
__a = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCamelCase )
common(is_instance_map=lowerCamelCase , segmentation_type="pil" )
common(is_instance_map=lowerCamelCase , segmentation_type="pil" )
def a__ ( self ):
__a = np.zeros((20, 50) )
__a = 1
__a = 1
__a = 1
__a = binary_mask_to_rle(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def a__ ( self ):
__a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__a = self.image_processing_tester.get_fake_oneformer_outputs()
__a = fature_extractor.post_process_semantic_segmentation(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__a = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__a = fature_extractor.post_process_semantic_segmentation(lowerCamelCase , target_sizes=lowerCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def a__ ( self ):
__a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__a = self.image_processing_tester.get_fake_oneformer_outputs()
__a = image_processor.post_process_instance_segmentation(lowerCamelCase , threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCamelCase )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def a__ ( self ):
__a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__a = self.image_processing_tester.get_fake_oneformer_outputs()
__a = image_processor.post_process_panoptic_segmentation(lowerCamelCase , threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCamelCase )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=10 , lowerCamelCase=[8, 16, 32, 64] , lowerCamelCase=[1, 1, 2, 1] , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=["stage2", "stage3", "stage4"] , lowerCamelCase=[2, 3, 4] , lowerCamelCase=1 , ):
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = embeddings_size
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = hidden_act
__a = num_labels
__a = scope
__a = len(lowerCamelCase )
__a = out_features
__a = out_indices
__a = num_groups
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = BitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = BitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = BitBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a = None
__a = BitBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Dict = False
_snake_case : Optional[Any] = False
_snake_case : Optional[Any] = False
_snake_case : Optional[int] = False
_snake_case : str = False
def a__ ( self ):
__a = BitModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def a__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ):
return
@unittest.skip(reason="Bit does not output attentions" )
def a__ ( self ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def a__ ( self ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def a__ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a = layer_type
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def a__ ( self ):
pass
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = BitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a__ ( self ):
__a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : int = (BitBackbone,) if is_torch_available() else ()
_snake_case : Any = BitConfig
_snake_case : List[Any] = False
def a__ ( self ):
__a = BitModelTester(self )
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def _lowerCamelCase( a ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]["hidden_dim"]
__a = CONFIG_MAP[model_name]["width_coef"]
__a = CONFIG_MAP[model_name]["depth_coef"]
__a = CONFIG_MAP[model_name]["image_size"]
__a = CONFIG_MAP[model_name]["dropout_rate"]
__a = CONFIG_MAP[model_name]["dw_padding"]
__a = "huggingface/label-files"
__a = "imagenet-1k-id2label.json"
__a = 1_0_0_0
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
def _lowerCamelCase( a ):
__a = CONFIG_MAP[model_name]["image_size"]
__a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=a , )
return preprocessor
def _lowerCamelCase( a ):
__a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__a = sorted(set(a ) )
__a = len(a )
__a = {b: str(a ) for b, i in zip(a , range(a ) )}
__a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = "efficientnet." + item[1]
__a = "classifier.weight"
__a = "classifier.bias"
return key_mapping
def _lowerCamelCase( a , a , a ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(a ) )
else:
__a = torch.from_numpy(a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a , a ):
__a = model_classes[model_name](
include_top=a , weights="imagenet" , input_tensor=a , input_shape=a , pooling=a , classes=1_0_0_0 , classifier_activation="softmax" , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(a )
__a = EfficientNetForImageClassification(a ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__a = rename_keys(a )
replace_params(a , a , a )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(a )
__a = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**a )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]["image_size"]
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(a )
__a = np.expand_dims(a , axis=0 )
__a = original_model.predict(a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a , a , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(a ):
os.mkdir(a )
# Save converted model and image processor
hf_model.save_pretrained(a )
preprocessor.save_pretrained(a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
__a = F"efficientnet-{model_name}"
preprocessor.push_to_hub(a )
hf_model.push_to_hub(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
SCREAMING_SNAKE_CASE__:int = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
import argparse
SCREAMING_SNAKE_CASE__:Dict = """docs/source/_static/js/custom.js"""
def _lowerCamelCase( a ):
with open(a , encoding="utf-8" , newline="\n" ) as f:
__a = f.readlines()
__a = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
__a = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
update_custom_js(args.version)
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a = 3 , a = 7 , a = 1_0_0_0_0_0_0 ):
__a = 0
__a = 1
for current_denominator in range(1 , limit + 1 ):
__a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__a = current_numerator
__a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = """ClapFeatureExtractor"""
_snake_case : Any = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
__a = kwargs.pop("sampling_rate" , lowerCamelCase )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__a = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if audios is not None:
__a = self.feature_extractor(
lowerCamelCase , sampling_rate=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and audios is not None:
__a = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def a__ ( self ):
__a = self.tokenizer.model_input_names
__a = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE__:Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE__:List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def _lowerCamelCase( a , a ):
return np.sqrt(np.sum((np.asarray(a ) - np.asarray(a )) ** 2 ) )
def _lowerCamelCase( a , a ):
return sum((va - va) ** 2 for va, va in zip(a , a ) ) ** (1 / 2)
if __name__ == "__main__":
def _lowerCamelCase( ):
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : int = CLIPConfig
_snake_case : Dict = ["""CLIPEncoderLayer"""]
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = CLIPVisionModelWithProjection(config.vision_config )
__a = nn.Linear(config.vision_config.projection_dim , 1 )
__a = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=0.5 , lowerCamelCase=0.5 ):
__a = self.vision_model(lowerCamelCase )[0]
__a = self.p_head(lowerCamelCase )
__a = nsfw_detected.flatten()
__a = nsfw_detected > p_threshold
__a = nsfw_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(lowerCamelCase ):
if nsfw_detected_:
__a = np.zeros(images[idx].shape )
__a = self.w_head(lowerCamelCase )
__a = watermark_detected.flatten()
__a = watermark_detected > w_threshold
__a = watermark_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(lowerCamelCase ):
if watermark_detected_:
__a = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : int = RobertaTokenizer
_snake_case : Optional[int] = RobertaTokenizerFast
_snake_case : Optional[int] = True
_snake_case : Optional[Any] = {"""cls_token""": """<s>"""}
def a__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__a = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__a = {"unk_token": "<unk>"}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def a__ ( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = "lower newer"
__a = "lower newer"
return input_text, output_text
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = "lower newer"
__a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__a = tokenizer.tokenize(lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokens + [tokenizer.unk_token]
__a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def a__ ( self ):
__a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCamelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCamelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def a__ ( self ):
__a = self.tokenizer_class.from_pretrained("roberta-base" )
__a = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def a__ ( self ):
__a = self.get_tokenizer()
__a = "Encode this sequence."
__a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# Testing spaces after special tokens
__a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase )} ) # mask token has a left space
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase )
__a = "Encode <mask> sequence"
__a = "Encode <mask>sequence"
__a = tokenizer.encode(lowerCamelCase )
__a = encoded.index(lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase )
__a = encoded.index(lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = "A, <mask> AllenNLP sentence."
__a = tokenizer_r.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
__a = tokenizer_p.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def a__ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCamelCase )
def a__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__a = F"{text_of_1_token} {text_of_1_token}"
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = CanineTokenizer
_snake_case : Any = False
def a__ ( self ):
super().setUp()
__a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def a__ ( self , **lowerCamelCase ):
__a = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
__a = 1024
return tokenizer
@require_torch
def a__ ( self ):
__a = self.canine_tokenizer
__a = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__a = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__a = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def a__ ( self ):
__a = self.canine_tokenizer
__a = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__a = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , lowerCamelCase )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertIn("token_type_ids" , lowerCamelCase )
@require_torch
def a__ ( self ):
__a = self.canine_tokenizer
__a = [
"What's the weater?",
"It's about 25 degrees.",
]
__a = tokenizer(
text_target=lowerCamelCase , max_length=32 , padding="max_length" , truncation=lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def a__ ( self ):
# safety check on max_len default value so we are sure the test works
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__a = tempfile.mkdtemp()
__a = " He is very happy, UNwant\u00E9d,running"
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__a = tokenizer.__class__.from_pretrained(lowerCamelCase )
__a = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
shutil.rmtree(lowerCamelCase )
__a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__a = tempfile.mkdtemp()
__a = " He is very happy, UNwant\u00E9d,running"
__a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__a = chr(0Xe_0_0_7 )
additional_special_tokens.append(lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__a = tokenizer.__class__.from_pretrained(lowerCamelCase )
__a = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertIn(lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__a = tokenizer.__class__.from_pretrained(lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase )
def a__ ( self ):
__a = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__a , __a = self.get_clean_sequence(lowerCamelCase )
# a special token for Canine can be defined as follows:
__a = 0Xe_0_0_5
__a = chr(lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 1 )
__a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , input_encoded + special_token_id )
__a = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
self.assertTrue(special_token not in decoded )
def a__ ( self ):
__a = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__a = chr(0Xe_0_0_5 )
__a = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__a = tokenizer.tokenize(lowerCamelCase )
__a = tokenizer.tokenize(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase )
self.assertEqual(token_a[0] , lowerCamelCase )
@require_tokenizers
def a__ ( self ):
__a = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__a = 0Xe_0_0_6
__a = chr(lowerCamelCase )
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase )
tokenizer.from_pretrained(lowerCamelCase )
def a__ ( self ):
__a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__a = json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__a = json.load(lowerCamelCase )
# a special token for Canine can be defined as follows:
__a = 0Xe_0_0_6
__a = chr(lowerCamelCase )
__a = [new_token_a]
__a = [new_token_a]
with open(os.path.join(lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a = tokenizer_class.from_pretrained(lowerCamelCase , extra_ids=0 )
self.assertIn(lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__a = 0Xe_0_0_7
__a = chr(lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a = [AddedToken(lowerCamelCase , lstrip=lowerCamelCase )]
__a = tokenizer_class.from_pretrained(
lowerCamelCase , additional_special_tokens=lowerCamelCase , extra_ids=0 )
self.assertIn(lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def a__ ( self ):
__a = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__a = "hello world"
if self.space_between_special_tokens:
__a = "[CLS] hello world [SEP]"
else:
__a = input
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.decode(lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase , [output, output.lower()] )
def a__ ( self ):
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__a = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__a = "a"
__a = ord(lowerCamelCase )
for attr in attributes_list:
setattr(lowerCamelCase , attr + "_id" , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , attr + "_id" ) , lowerCamelCase )
setattr(lowerCamelCase , attr + "_id" , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , attr + "_id" ) , lowerCamelCase )
setattr(lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__a = 0Xe_0_0_6
__a = chr(lowerCamelCase )
setattr(lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase( a , a=0.9_99 , a="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__a = []
for i in range(a ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : str = [e.name for e in KarrasDiffusionSchedulers]
_snake_case : Any = 2
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.0_0085 , lowerCamelCase = 0.012 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = "epsilon" , lowerCamelCase = "linspace" , lowerCamelCase = 0 , ):
if trained_betas is not None:
__a = torch.tensor(lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(lowerCamelCase ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = self.index_for_timestep(lowerCamelCase )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase , dtype=lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(lowerCamelCase , 0 , -step_ratio )).round().copy().astype(lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(lowerCamelCase ) ).to(lowerCamelCase )
__a = np.interp(lowerCamelCase , np.arange(0 , len(lowerCamelCase ) ) , lowerCamelCase )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase ).startswith("mps" ):
# mps does not support float64
__a = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase , dtype=torch.floataa )
else:
__a = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
# interpolate timesteps
__a = self.sigma_to_t(lowerCamelCase ).to(lowerCamelCase , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def a__ ( self ):
return self.sample is None
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
__a = self.index_for_timestep(lowerCamelCase )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(lowerCamelCase , lowerCamelCase ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( snake_case_ ):
_snake_case : Any = (EulerDiscreteScheduler,)
_snake_case : Tuple = 10
def a__ ( self , **lowerCamelCase ):
__a = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase )
return config
def a__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def a__ ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def a__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def a__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def a__ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
__a = output.prev_sample
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type="v_prediction" )
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
__a = output.prev_sample
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def a__ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
__a = output.prev_sample
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
__a = output.prev_sample
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case__ ( nn.Module ):
def __init__( self ):
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def a__ ( self , lowerCamelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ):
return (args[0] + 1,) + args[1:], kwargs
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase , lowerCamelCase ):
return output + 1
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = ModelForTest()
__a = ModelHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
self.assertEqual(test_model._hf_hook , lowerCamelCase )
self.assertTrue(hasattr(lowerCamelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(lowerCamelCase )
self.assertFalse(hasattr(lowerCamelCase , "_hf_hook" ) )
self.assertFalse(hasattr(lowerCamelCase , "_old_forward" ) )
def a__ ( self ):
__a = ModelForTest()
__a = ModelHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
add_hook_to_module(lowerCamelCase , lowerCamelCase , append=lowerCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCamelCase ) , lowerCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCamelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(lowerCamelCase )
self.assertFalse(hasattr(lowerCamelCase , "_hf_hook" ) )
self.assertFalse(hasattr(lowerCamelCase , "_old_forward" ) )
def a__ ( self ):
__a = ModelForTest()
__a = torch.randn(2 , 3 )
__a = test_model(x + 1 )
__a = test_model(x + 2 )
__a = PreForwardHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__a = PreForwardHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 )
def a__ ( self ):
__a = ModelForTest()
__a = torch.randn(2 , 3 )
__a = test_model(lowerCamelCase )
__a = PostForwardHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__a = PostForwardHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
assert torch.allclose(lowerCamelCase , output + 2 , atol=1E-5 )
def a__ ( self ):
__a = ModelForTest()
__a = torch.randn(2 , 3 )
__a = test_model(lowerCamelCase )
__a = PostForwardHook()
add_hook_to_module(lowerCamelCase , lowerCamelCase )
__a = test_model(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__a = True
__a = test_model(lowerCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self ):
__a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase , AlignDevicesHook(io_same_device=lowerCamelCase ) )
__a = torch.randn(2 , 3 ).to(0 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self ):
__a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__a = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__a = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase )
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
__a = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a__ ( self ):
__a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__a = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(lowerCamelCase , execution_device=lowerCamelCase , offload=lowerCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__a = torch.device(lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase )
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase , execution_device=lowerCamelCase , offload=lowerCamelCase , offload_buffers=lowerCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a__ ( self ):
__a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__a = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
lowerCamelCase , execution_device=lowerCamelCase , offload=lowerCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__a = torch.device(lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase )
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase , execution_device=lowerCamelCase , offload=lowerCamelCase , weights_map=model.state_dict() , offload_buffers=lowerCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__a = torch.randn(2 , 3 )
__a = model(lowerCamelCase )
self.assertEqual(output.device , lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE__:Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
SCREAMING_SNAKE_CASE__:int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class snake_case__ ( snake_case_ ):
_snake_case : Union[str, Any] = """whisper"""
_snake_case : int = ["""past_key_values"""]
_snake_case : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=51865 , lowerCamelCase=80 , lowerCamelCase=6 , lowerCamelCase=4 , lowerCamelCase=6 , lowerCamelCase=4 , lowerCamelCase=1536 , lowerCamelCase=1536 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=50257 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=256 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=False , lowerCamelCase=1500 , lowerCamelCase=448 , lowerCamelCase=50256 , lowerCamelCase=50256 , lowerCamelCase=50256 , lowerCamelCase=None , lowerCamelCase=[220, 50256] , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=False , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=7 , **lowerCamelCase , ):
__a = vocab_size
__a = num_mel_bins
__a = d_model
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_layers
__a = decoder_attention_heads
__a = decoder_ffn_dim
__a = encoder_ffn_dim
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
__a = max_source_positions
__a = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__a = classifier_proj_size
__a = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
__a = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , suppress_tokens=lowerCamelCase , begin_suppress_tokens=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
__a = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = 22050 , lowerCamelCase = 5.0 , lowerCamelCase = 220 , ):
__a = OrderedDict()
__a = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase , framework=lowerCamelCase , sampling_rate=lowerCamelCase , time_duration=lowerCamelCase , frequency=lowerCamelCase , )
__a = encoder_inputs["input_features"].shape[2]
__a = encoder_sequence_length // 2 if self.use_past else seq_length
__a = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = encoder_inputs.pop("input_features" )
__a = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
__a = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def a__ ( self ):
return 1E-3
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class snake_case__ :
# setable values
_snake_case : Optional[int] = None
_snake_case : Optional[jnp.ndarray] = None
_snake_case : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def a__ ( cls ):
return cls()
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : jnp.ndarray
_snake_case : jnp.ndarray
_snake_case : KarrasVeSchedulerState
class snake_case__ ( snake_case_, snake_case_ ):
@property
def a__ ( self ):
return True
@register_to_config
def __init__( self , lowerCamelCase = 0.02 , lowerCamelCase = 100 , lowerCamelCase = 1.007 , lowerCamelCase = 80 , lowerCamelCase = 0.05 , lowerCamelCase = 50 , ):
pass
def a__ ( self ):
return KarrasVeSchedulerState.create()
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = () ):
__a = jnp.arange(0 , lowerCamelCase )[::-1].copy()
__a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCamelCase , schedule=jnp.array(lowerCamelCase , dtype=jnp.floataa ) , timesteps=lowerCamelCase , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
if self.config.s_min <= sigma <= self.config.s_max:
__a = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__a = 0
# sample eps ~ N(0, S_noise^2 * I)
__a = random.split(lowerCamelCase , num=1 )
__a = self.config.s_noise * random.normal(key=lowerCamelCase , shape=sample.shape )
__a = sigma + gamma * sigma
__a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
__a = sample_hat + sigma_hat * model_output
__a = (sample_hat - pred_original_sample) / sigma_hat
__a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase , derivative=lowerCamelCase , state=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
__a = sample_prev + sigma_prev * model_output
__a = (sample_prev - pred_original_sample) / sigma_prev
__a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase , derivative=lowerCamelCase , state=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
raise NotImplementedError()
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCamelCase( a = 8 ):
__a = ascii_letters + digits + punctuation
return "".join(secrets.choice(a ) for _ in range(a ) )
def _lowerCamelCase( a , a ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(a )
__a = i // 3
__a = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__a = (
chars_incl
+ random(a , quotient + remainder )
+ random(a , a )
+ random(a , a )
)
__a = list(a )
shuffle(a )
return "".join(a )
# random is a generalised function for letters, characters and numbers
def _lowerCamelCase( a , a ):
return "".join(secrets.choice(a ) for _ in range(a ) )
def _lowerCamelCase( a , a ):
pass # Put your code here...
def _lowerCamelCase( a , a ):
pass # Put your code here...
def _lowerCamelCase( a , a ):
pass # Put your code here...
def _lowerCamelCase( a , a = 8 ):
if len(a ) < min_length:
# Your Password must be at least 8 characters long
return False
__a = any(char in ascii_uppercase for char in password )
__a = any(char in ascii_lowercase for char in password )
__a = any(char in digits for char in password )
__a = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCamelCase( ):
__a = int(input("Please indicate the max length of your password: " ).strip() )
__a = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(a ) )
print(
"Alternative Password generated:" , alternative_password_generator(a , a ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
from math import isqrt
def _lowerCamelCase( a ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(a ) + 1 ) )
def _lowerCamelCase( a = 1_0**6 ):
__a = 0
__a = 1
__a = 7
while prime_candidate < max_prime:
primes_count += is_prime(a )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = """align_text_model"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = pad_token_id
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
__a , __a = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class snake_case__ ( snake_case_ ):
_snake_case : Dict = """align_vision_model"""
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 600 , lowerCamelCase = 2.0 , lowerCamelCase = 3.1 , lowerCamelCase = 8 , lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase = [] , lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase = 0.25 , lowerCamelCase = "swish" , lowerCamelCase = 2560 , lowerCamelCase = "mean" , lowerCamelCase = 0.02 , lowerCamelCase = 0.001 , lowerCamelCase = 0.99 , lowerCamelCase = 0.2 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = num_channels
__a = image_size
__a = width_coefficient
__a = depth_coefficient
__a = depth_divisor
__a = kernel_sizes
__a = in_channels
__a = out_channels
__a = depthwise_padding
__a = strides
__a = num_block_repeats
__a = expand_ratios
__a = squeeze_expansion_ratio
__a = hidden_act
__a = hidden_dim
__a = pooling_type
__a = initializer_range
__a = batch_norm_eps
__a = batch_norm_momentum
__a = drop_connect_rate
__a = sum(lowerCamelCase ) * 4
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
__a , __a = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class snake_case__ ( snake_case_ ):
_snake_case : str = """align"""
_snake_case : List[Any] = True
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=640 , lowerCamelCase=1.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if text_config is None:
__a = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
__a = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
__a = AlignTextConfig(**lowerCamelCase )
__a = AlignVisionConfig(**lowerCamelCase )
__a = projection_dim
__a = temperature_init_value
__a = initializer_range
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase )
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a ):
# Checks if the entire collection has been sorted
if len(a ) <= 1 or n <= 1:
return
insert_next(a , n - 1 )
rec_insertion_sort(a , n - 1 )
def _lowerCamelCase( a , a ):
# Checks order between adjacent elements
if index >= len(a ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__a , __a = (
collection[index],
collection[index - 1],
)
insert_next(a , index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = input("""Enter integers separated by spaces: """)
SCREAMING_SNAKE_CASE__:list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[str] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCamelCase( a ):
__a = 3_8_4
__a = 7
if "tiny" in model_name:
__a = 9_6
__a = (2, 2, 6, 2)
__a = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__a = 9_6
__a = (2, 2, 1_8, 2)
__a = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__a = 1_2_8
__a = (2, 2, 1_8, 2)
__a = (4, 8, 1_6, 3_2)
__a = 1_2
__a = 5_1_2
elif "large" in model_name:
__a = 1_9_2
__a = (2, 2, 1_8, 2)
__a = (6, 1_2, 2_4, 4_8)
__a = 1_2
__a = 7_6_8
# set label information
__a = 1_5_0
__a = "huggingface/label-files"
__a = "ade20k-id2label.json"
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = SwinConfig(
embed_dim=a , depths=a , num_heads=a , window_size=a , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__a = UperNetConfig(
backbone_config=a , auxiliary_in_channels=a , num_labels=a , idalabel=a , labelaid=a , )
return config
def _lowerCamelCase( a ):
__a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowerCamelCase( a , a , a ):
__a = dct.pop(a )
__a = val
def _lowerCamelCase( a , a ):
__a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__a = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[:dim, :]
__a = in_proj_bias[: dim]
__a = in_proj_weight[
dim : dim * 2, :
]
__a = in_proj_bias[
dim : dim * 2
]
__a = in_proj_weight[
-dim :, :
]
__a = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase( a ):
__a , __a = x.shape
__a = x.reshape(a , 4 , in_channel // 4 )
__a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(a , a )
return x
def _lowerCamelCase( a ):
__a , __a = x.shape
__a = x.reshape(a , in_channel // 4 , 4 )
__a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(a , a )
return x
def _lowerCamelCase( a ):
__a = x.shape[0]
__a = x.reshape(4 , in_channel // 4 )
__a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(a )
return x
def _lowerCamelCase( a ):
__a = x.shape[0]
__a = x.reshape(in_channel // 4 , 4 )
__a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(a )
return x
def _lowerCamelCase( a , a , a ):
__a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(a , map_location="cpu" , file_name=a )[
"state_dict"
]
for name, param in state_dict.items():
print(a , param.shape )
__a = get_upernet_config(a )
__a = UperNetForSemanticSegmentation(a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a = state_dict.pop(a )
if "bn" in key:
__a = key.replace("bn" , "batch_norm" )
__a = val
# rename keys
__a = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__a = reverse_correct_unfold_reduction_order(a )
if "norm" in key:
__a = reverse_correct_unfold_norm_order(a )
model.load_state_dict(a )
# verify on image
__a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__a = Image.open(requests.get(a , stream=a ).raw ).convert("RGB" )
__a = SegformerImageProcessor()
__a = processor(a , return_tensors="pt" ).pixel_values
with torch.no_grad():
__a = model(a )
__a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__a = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
__a = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
__a = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
__a = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(a )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__:Dict = TypeVar("""T""")
class snake_case__ ( Generic[T] ):
def __init__( self , lowerCamelCase = True ):
__a = {} # dictionary of lists
__a = directed
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
self.adj_list[destination_vertex].append(lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
__a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase )
__a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__a = [destination_vertex]
__a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase )
__a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__a = [destination_vertex]
__a = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = """Hello world! cécé herlolip"""
def _lowerCamelCase( a , a , a ):
__a = FairseqRobertaModel.from_pretrained(a )
roberta.eval() # disable dropout
__a = roberta.model.encoder.sentence_encoder
__a = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__a = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , a )
__a = XLMRobertaXLForSequenceClassification(a ) if classification_head else XLMRobertaXLForMaskedLM(a )
model.eval()
# Now let's copy all the weights.
# Embeddings
__a = roberta_sent_encoder.embed_tokens.weight
__a = roberta_sent_encoder.embed_positions.weight
__a = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__a = roberta_sent_encoder.layer_norm.weight
__a = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__a = model.roberta.encoder.layer[i]
__a = roberta_sent_encoder.layers[i]
__a = layer.attention
__a = roberta_layer.self_attn_layer_norm.weight
__a = roberta_layer.self_attn_layer_norm.bias
# self attention
__a = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__a = roberta_layer.self_attn.q_proj.weight
__a = roberta_layer.self_attn.q_proj.bias
__a = roberta_layer.self_attn.k_proj.weight
__a = roberta_layer.self_attn.k_proj.bias
__a = roberta_layer.self_attn.v_proj.weight
__a = roberta_layer.self_attn.v_proj.bias
# self-attention output
__a = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__a = roberta_layer.self_attn.out_proj.weight
__a = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__a = roberta_layer.final_layer_norm.weight
__a = roberta_layer.final_layer_norm.bias
# intermediate
__a = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__a = roberta_layer.fca.weight
__a = roberta_layer.fca.bias
# output
__a = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__a = roberta_layer.fca.weight
__a = roberta_layer.fca.bias
# end of layer
if classification_head:
__a = roberta.model.classification_heads["mnli"].dense.weight
__a = roberta.model.classification_heads["mnli"].dense.bias
__a = roberta.model.classification_heads["mnli"].out_proj.weight
__a = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__a = roberta.model.encoder.lm_head.dense.weight
__a = roberta.model.encoder.lm_head.dense.bias
__a = roberta.model.encoder.lm_head.layer_norm.weight
__a = roberta.model.encoder.lm_head.layer_norm.bias
__a = roberta.model.encoder.lm_head.weight
__a = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a = roberta.encode(a ).unsqueeze(0 ) # batch of size 1
__a = model(a )[0]
if classification_head:
__a = roberta.model.classification_heads["mnli"](roberta.extract_features(a ) )
else:
__a = roberta.model(a )[0]
print(our_output.shape , their_output.shape )
__a = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__a = torch.allclose(a , a , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(a ).mkdir(parents=a , exist_ok=a )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__:int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _lowerCamelCase( a = 1_0_0 ):
__a = 1
__a = 2
for i in range(2 , max_n + 1 ):
__a = pre_numerator
__a = 2 * i // 3 if i % 3 == 0 else 1
__a = cur_numerator
__a = e_cont * pre_numerator + temp
return sum_digits(a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class snake_case__ ( snake_case_ ):
_snake_case : int = """instructblip_vision_model"""
def __init__( self , lowerCamelCase=1408 , lowerCamelCase=6144 , lowerCamelCase=39 , lowerCamelCase=16 , lowerCamelCase=224 , lowerCamelCase=14 , lowerCamelCase="gelu" , lowerCamelCase=1E-6 , lowerCamelCase=0.0 , lowerCamelCase=1E-10 , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
__a , __a = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class snake_case__ ( snake_case_ ):
_snake_case : List[str] = """instructblip_qformer"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=2 , lowerCamelCase=1408 , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
__a , __a = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__a = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = """instructblip"""
_snake_case : Any = True
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=32 , **lowerCamelCase ):
super().__init__(**lowerCamelCase )
if vision_config is None:
__a = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
__a = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
__a = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
__a = InstructBlipVisionConfig(**lowerCamelCase )
__a = InstructBlipQFormerConfig(**lowerCamelCase )
__a = text_config["model_type"] if "model_type" in text_config else "opt"
__a = CONFIG_MAPPING[text_model_type](**lowerCamelCase )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase , )
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def _lowerCamelCase( a ):
create_state_space_tree(a , [] , 0 )
def _lowerCamelCase( a , a , a ):
if index == len(a ):
print(a )
return
create_state_space_tree(a , a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a , a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Optional[int] = True
_snake_case : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ):
__a = FlaxRoFormerModelTester(self )
@slow
def a__ ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCamelCase )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__a = jnp.array([[0, 1, 2, 3, 4, 5]] )
__a = model(lowerCamelCase )[0]
__a = 50000
__a = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
__a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def _lowerCamelCase( a = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__a = nums[0]
for i in range(1 , len(a ) ):
__a = nums[i]
__a = max(a , ans + num , a )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number of elements : """).strip())
SCREAMING_SNAKE_CASE__:Union[str, Any] = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class snake_case__ ( snake_case_ ):
def a__ ( self ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ):
__a = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(lowerCamelCase )
def a__ ( self ):
__a = self._create_example_records()
__a = Dataset.from_list(lowerCamelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(lowerCamelCase ):
self.assertDictEqual(lowerCamelCase , example_records[i] )
def a__ ( self ):
__a = self._create_example_records()
__a = Dataset.from_list(lowerCamelCase )
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ): # checks what happens with missing columns
__a = [{"col_1": 1}, {"col_2": "x"}]
__a = Dataset.from_list(lowerCamelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def a__ ( self ): # checks if the type can be inferred from the second record
__a = [{"col_1": []}, {"col_1": [1, 2]}]
__a = Dataset.from_list(lowerCamelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def a__ ( self ):
__a = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
SCREAMING_SNAKE_CASE__:Any = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = """maskformer"""
_snake_case : int = {"""hidden_size""": """mask_feature_size"""}
_snake_case : str = ["""resnet""", """swin"""]
_snake_case : List[str] = ["""detr"""]
def __init__( self , lowerCamelCase = 256 , lowerCamelCase = 256 , lowerCamelCase = 0.1 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0.02 , lowerCamelCase = 1.0 , lowerCamelCase = 1.0 , lowerCamelCase = 1.0 , lowerCamelCase = 20.0 , lowerCamelCase = None , **lowerCamelCase , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.pop("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop("model_type" ) if isinstance(lowerCamelCase , lowerCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
return cls(
backbone_config=lowerCamelCase , decoder_config=lowerCamelCase , **lowerCamelCase , )
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = """transfo-xl"""
_snake_case : List[str] = ["""mems"""]
_snake_case : Optional[Any] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase=267735 , lowerCamelCase=[20000, 40000, 200000] , lowerCamelCase=1024 , lowerCamelCase=1024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1600 , lowerCamelCase=1000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase=0 , **lowerCamelCase , ):
__a = vocab_size
__a = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
__a = [False] + [True] * len(self.cutoffs )
else:
__a = [False] + [False] * len(self.cutoffs )
__a = d_model
__a = d_embed
__a = d_head
__a = d_inner
__a = div_val
__a = pre_lnorm
__a = n_layer
__a = n_head
__a = mem_len
__a = same_length
__a = attn_type
__a = clamp_len
__a = sample_softmax
__a = adaptive
__a = dropout
__a = dropatt
__a = untie_r
__a = init
__a = init_range
__a = proj_init_std
__a = init_std
__a = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def a__ ( self ):
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def a__ ( self , lowerCamelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case__ :
def __init__( self , lowerCamelCase , ):
__a = parent
__a = 13
__a = 7
__a = 30
__a = self.seq_length + self.mem_len
__a = 15
__a = True
__a = True
__a = 99
__a = [10, 50, 80]
__a = 32
__a = 32
__a = 4
__a = 8
__a = 128
__a = 2
__a = 2
__a = None
__a = 1
__a = 0
__a = 3
__a = self.vocab_size - 1
__a = 0.01
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def a__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = TFTransfoXLModel(lowerCamelCase )
__a , __a = model(lowerCamelCase ).to_tuple()
__a = {"input_ids": input_ids_a, "mems": mems_a}
__a , __a = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = TFTransfoXLLMHeadModel(lowerCamelCase )
__a , __a = model(lowerCamelCase ).to_tuple()
__a = {"input_ids": input_ids_a, "labels": lm_labels}
__a , __a = model(lowerCamelCase ).to_tuple()
__a , __a = model([input_ids_a, mems_a] ).to_tuple()
__a = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__a , __a = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = TFTransfoXLForSequenceClassification(lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Optional[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_snake_case : Optional[Any] = () if is_tf_available() else ()
_snake_case : Dict = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_snake_case : str = False
_snake_case : Dict = False
_snake_case : str = False
_snake_case : Optional[int] = False
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def a__ ( self ):
__a = TFTransfoXLModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , d_embed=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
self.model_tester.set_seed()
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase )
def a__ ( self ):
self.model_tester.set_seed()
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__a = model.get_output_embeddings()
assert isinstance(lowerCamelCase , tf.keras.layers.Layer )
__a = model.get_bias()
assert name is None
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
def a__ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def a__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFTransfoXLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def a__ ( self ):
pass
@require_tf
class snake_case__ ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def a__ ( self ):
__a = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__a = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__a = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__a = model.generate(lowerCamelCase , max_length=200 , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase )
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
SCREAMING_SNAKE_CASE__:Any = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class snake_case__ :
def __init__( self , lowerCamelCase = 14 ):
if group not in primes:
raise ValueError("Unsupported Group" )
__a = primes[group]["prime"]
__a = primes[group]["generator"]
__a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ ( self ):
return hex(self.__private_key )[2:]
def a__ ( self ):
__a = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def a__ ( self , lowerCamelCase ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ ( self , lowerCamelCase ):
__a = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError("Invalid public key" )
__a = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ):
__a = int(lowerCamelCase , base=16 )
__a = int(lowerCamelCase , base=16 )
__a = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError("Invalid public key" )
__a = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
from copy import deepcopy
class snake_case__ :
def __init__( self , lowerCamelCase = None , lowerCamelCase = None ):
if arr is None and size is not None:
__a = size
__a = [0] * size
elif arr is not None:
self.init(lowerCamelCase )
else:
raise ValueError("Either arr or size must be specified" )
def a__ ( self , lowerCamelCase ):
__a = len(lowerCamelCase )
__a = deepcopy(lowerCamelCase )
for i in range(1 , self.size ):
__a = self.next_(lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def a__ ( self ):
__a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__a = self.next_(lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def a__ ( lowerCamelCase ):
return index + (index & (-index))
@staticmethod
def a__ ( lowerCamelCase ):
return index - (index & (-index))
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__a = self.next_(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
self.add(lowerCamelCase , value - self.get(lowerCamelCase ) )
def a__ ( self , lowerCamelCase ):
if right == 0:
return 0
__a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__a = self.prev(lowerCamelCase )
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
return self.prefix(lowerCamelCase ) - self.prefix(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return self.query(lowerCamelCase , index + 1 )
def a__ ( self , lowerCamelCase ):
value -= self.tree[0]
if value < 0:
return -1
__a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """camembert"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=2 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=36 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=6 , lowerCamelCase=6 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=1000 , ):
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = patch_size
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = coordinate_size
__a = shape_size
__a = num_labels
__a = num_choices
__a = scope
__a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a = text_seq_length
__a = (image_size // patch_size) ** 2 + 1
__a = self.text_seq_length + self.image_seq_length
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = tmp_coordinate
__a = tf.constant(lowerCamelCase )
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.text_seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = TFLayoutLMvaModel(config=lowerCamelCase )
# text + image
__a = model(lowerCamelCase , pixel_values=lowerCamelCase , training=lowerCamelCase )
__a = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , training=lowerCamelCase , )
__a = model(lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a = model({"pixel_values": pixel_values} , training=lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase )
__a = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = TFLayoutLMvaForTokenClassification(config=lowerCamelCase )
__a = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = 2
__a = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase )
__a = model(
lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , training=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : List[str] = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : Tuple = False
_snake_case : Union[str, Any] = False
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return True
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
__a = copy.deepcopy(lowerCamelCase )
if model_class in get_values(lowerCamelCase ):
__a = {
k: tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase ):
__a = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase ):
__a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase ):
__a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase ):
__a = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def a__ ( self ):
__a = TFLayoutLMvaModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
if getattr(lowerCamelCase , "hf_compute_loss" , lowerCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__a = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase , return_labels=lowerCamelCase )
__a = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase )[0]
]
__a = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase , return_labels=lowerCamelCase )
__a = prepared_for_class.pop("input_ids" )
__a = model(lowerCamelCase , **lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase , return_labels=lowerCamelCase )
__a = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
__a = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a = -100
__a = tf.convert_to_tensor(lowerCamelCase )
__a = model(lowerCamelCase , **lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase , return_labels=lowerCamelCase )
__a = model(lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase , return_labels=lowerCamelCase )
# Get keys that were added with the _prepare_for_class function
__a = prepared_for_class.keys() - inputs_dict.keys()
__a = inspect.signature(model.call ).parameters
__a = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a = {0: "input_ids"}
for label_key in label_keys:
__a = signature_names.index(lowerCamelCase )
__a = label_key
__a = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a = prepared_for_class[value]
__a = tuple(lowerCamelCase )
# Send to model
__a = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFLayoutLMvaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="tf" ).pixel_values
__a = tf.constant([[1, 2]] )
__a = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a = model(input_ids=lowerCamelCase , bbox=lowerCamelCase , pixel_values=lowerCamelCase , training=lowerCamelCase )
# verify the logits
__a = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__:List[Any] = {str(digit): digit**5 for digit in range(10)}
def _lowerCamelCase( a ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a ) )
def _lowerCamelCase( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a ) )
if __name__ == "__main__":
print(solution())
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = len(a )
__a = sum(a )
__a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__a = True
for i in range(1 , s + 1 ):
__a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__a = dp[i][j - 1]
if arr[i - 1] <= j:
__a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__a = s - 2 * j
break
return diff
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import os
def _lowerCamelCase( ):
with open(os.path.dirname(a ) + "/grid.txt" ) as f:
__a = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(a ) for x in f.readline().split()] )
__a = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE__:Dict = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
SCREAMING_SNAKE_CASE__:Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
SCREAMING_SNAKE_CASE__:Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None , lowerCamelCase="warn" , ):
__a = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
__a = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(lowerCamelCase )
from datasets import load_dataset
__a = load_dataset("nielsr/rvlcdip-demo" )
__a = dataset["train"][0]["image"].convert("RGB" )
__a = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits
__a = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowerCamelCase )
__a = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=lowerCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE__:str = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
SCREAMING_SNAKE_CASE__:Any = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def _lowerCamelCase( a ):
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(a )
# emb -> embedding
if name.startswith("emb." ):
__a = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
__a = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
__a = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , a )
# ffn -> feed_forward
__a = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , a )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
__a = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
__a = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
__a = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
__a = "rwkv." + name
__a = weight
return state_dict
def _lowerCamelCase( a , a , a , a=None , a=None , a=False , a=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
__a = 5_0_2_7_7
__a = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=a )
__a = len(a )
tokenizer.save_pretrained(a )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
__a = RwkvConfig(
vocab_size=a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(a )
# 3. Download model file then convert state_dict
__a = hf_hub_download(a , a )
__a = torch.load(a , map_location="cpu" )
__a = convert_state_dict(a )
# 4. Split in shards and save
__a , __a = shard_checkpoint(a )
for shard_file, shard in shards.items():
torch.save(a , os.path.join(a , a ) )
if index is not None:
__a = os.path.join(a , a )
# Save the index as well
with open(a , "w" , encoding="utf-8" ) as f:
__a = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(a , a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(a , a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
__a = AutoModelForCausalLM.from_pretrained(a )
model.push_to_hub(a , max_shard_size="2GB" )
tokenizer.push_to_hub(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import math
def _lowerCamelCase( a , a ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE__:Dict = "Enter the base and the power separated by a comma: "
SCREAMING_SNAKE_CASE__:Dict = map(int, input(prompt).split(""","""))
SCREAMING_SNAKE_CASE__:Union[str, Any] = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE__:int = res(xa, ya)
SCREAMING_SNAKE_CASE__:Optional[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 700 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.