code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase__ = "sshleifer/bart-tiny-random"
lowerCamelCase__ = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return AutoConfig.from_pretrained(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase, *_UpperCamelCase : Optional[Any] = create_student_by_copying_alternating_layers(__a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase, *_UpperCamelCase : Optional[int] = create_student_by_copying_alternating_layers(__a , tempfile.mkdtemp() , e=1 , d=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
_UpperCamelCase, *_UpperCamelCase : Any = create_student_by_copying_alternating_layers(__a , tempfile.mkdtemp() , e=1 , d=__a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase, *_UpperCamelCase : Union[str, Any] = create_student_by_copying_alternating_layers(__a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
with self.assertRaises(__a ):
create_student_by_copying_alternating_layers(__a , tempfile.mkdtemp() , e=__a , d=__a )
| 310
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = ["pixel_values"]
def __init__( self : Dict , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : List[str] = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase : str = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
_UpperCamelCase : List[str] = do_resize
_UpperCamelCase : List[Any] = size
_UpperCamelCase : Any = resample
_UpperCamelCase : Union[str, Any] = do_center_crop
_UpperCamelCase : List[str] = crop_size
_UpperCamelCase : Tuple = do_rescale
_UpperCamelCase : Tuple = rescale_factor
_UpperCamelCase : List[str] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : List[str] = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self : str , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) -> np.ndarray:
_UpperCamelCase : Optional[int] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCamelCase : str = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
_UpperCamelCase : str = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> Optional[int]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : str , ) -> PIL.Image.Image:
_UpperCamelCase : str = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Union[str, Any] = size if size is not None else self.size
_UpperCamelCase : Union[str, Any] = get_size_dict(__a , param_name="size" , default_to_square=__a )
_UpperCamelCase : Any = resample if resample is not None else self.resample
_UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : int = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Any = image_std if image_std is not None else self.image_std
_UpperCamelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : str = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : Tuple = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[int] = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCamelCase : Dict = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_UpperCamelCase : Dict = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_UpperCamelCase : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCamelCase : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCamelCase : Optional[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : List[str] = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase__ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCamelCase__ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowerCamelCase__ = "zero2"
lowerCamelCase__ = "zero3"
lowerCamelCase__ = [ZEROa, ZEROa]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : List[str] = parameterized.to_safe_name("_".join(str(lowercase_ ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowerCamelCase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@parameterized.expand(__a , name_func=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, Any] , __a : Optional[Any] ) -> str:
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : Optional[Any] ) -> Optional[Any]:
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any , __a : List[Any] ) -> Optional[Any]:
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[Any] , __a : int ) -> Tuple:
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int ) -> Dict:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : str , __a : int = 10 , __a : bool = True , __a : bool = True , __a : bool = True , ) -> int:
_UpperCamelCase : Tuple = models[model]
_UpperCamelCase : Optional[Any] = self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : str , __a : int = 10 , __a : int = 1 , __a : bool = True , __a : bool = True , ) -> str:
_UpperCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir("./xxx" , after=__a )
_UpperCamelCase : Optional[Any] = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__a )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCamelCase : Dict = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_UpperCamelCase : Any = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_UpperCamelCase : Dict = self.get_launcher(__a )
_UpperCamelCase : int = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Dict=False ) -> Dict:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_UpperCamelCase : int = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_UpperCamelCase : Optional[Any] = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" ,lowercase_ )
if matches:
_UpperCamelCase : Tuple = float(matches[1] )
_UpperCamelCase : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_UpperCamelCase : Optional[Any] = 1_001
_UpperCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_UpperCamelCase : Union[str, Any] = "huggingface/label-files"
_UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : str = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = "background"
_UpperCamelCase : Optional[int] = idalabel
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : str = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=False ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Dict = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
_UpperCamelCase : str = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ ,lowercase_ ,lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_UpperCamelCase : List[Any] = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} ,size={"shortest_edge": config.image_size + 32} ,)
_UpperCamelCase : Optional[int] = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : Optional[int] = model(**lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_UpperCamelCase : str = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_UpperCamelCase : Optional[int] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_UpperCamelCase : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,lowercase_ ,atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print("Pushing to the hub..." )
_UpperCamelCase : Dict = "google/" + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 310
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 4_000_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase, _UpperCamelCase : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase_ )
_UpperCamelCase, _UpperCamelCase : int = b, a + b
return sum(lowercase_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase__ = 25_6047
lowerCamelCase__ = 25_6145
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = NllbTokenizer
SCREAMING_SNAKE_CASE__ :List[Any] = NllbTokenizerFast
SCREAMING_SNAKE_CASE__ :List[Any] = True
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :List[Any] = {}
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : List[str] = NllbTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : int = NllbTokenizer(__a , keep_accents=__a )
_UpperCamelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase : Tuple = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCamelCase : str = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Any = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tempfile.mkdtemp()
_UpperCamelCase : Dict = tokenizer_r.save_pretrained(__a )
_UpperCamelCase : Optional[int] = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCamelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
_UpperCamelCase : Tuple = tokenizer_r.from_pretrained(__a )
_UpperCamelCase : List[Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase : Any = tempfile.mkdtemp()
_UpperCamelCase : Tuple = tokenizer_r.save_pretrained(__a , legacy_format=__a )
_UpperCamelCase : Any = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
_UpperCamelCase : Union[str, Any] = tokenizer_r.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase : List[str] = tempfile.mkdtemp()
_UpperCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase : Dict = tokenizer_r.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
if not self.test_seqaseq:
return
_UpperCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
_UpperCamelCase : Any = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_UpperCamelCase : int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_UpperCamelCase : List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=__a , tgt_texts=__a , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_UpperCamelCase : Tuple = tokenizer.prepare_seqaseq_batch(
__a , tgt_texts=__a , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_UpperCamelCase : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=__a , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , __a )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : Dict = [AddedToken("<special>" , lstrip=__a )]
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode("Hey this is a <special> token" )
_UpperCamelCase : List[str] = tokenizer_r.encode("<special>" , add_special_tokens=__a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a , )
_UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
_UpperCamelCase : Dict = tokenizer_p.encode("Hey this is a <special> token" )
_UpperCamelCase : Union[str, Any] = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "facebook/nllb-200-distilled-600M"
SCREAMING_SNAKE_CASE__ :Any = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE__ :Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE__ :str = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str ) -> List[Any]:
_UpperCamelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_UpperCamelCase : Optional[int] = 1
return cls
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_6057 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
_UpperCamelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
_UpperCamelCase : Optional[Any] = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
_UpperCamelCase : Optional[int] = self.tokenizer.decode(__a , skip_special_tokens=__a )
_UpperCamelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
_UpperCamelCase : List[Any] = 10
_UpperCamelCase : Union[str, Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __a )
self.assertEqual(len(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_6203, 3] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : List[Any] = tempfile.mkdtemp()
_UpperCamelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
_UpperCamelCase : Optional[Any] = NllbTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
_UpperCamelCase : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCamelCase : str = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_UpperCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(__a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
_UpperCamelCase : Optional[Any] = targets["input_ids"]
_UpperCamelCase : str = shift_tokens_right(
__a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
} , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Any = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Dict = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : Union[str, Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __a : Optional[Any] ) -> Any:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = hidden_states.shape
_UpperCamelCase : Union[str, Any] = jax.image.resize(
__a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
_UpperCamelCase : str = self.conv(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __a : Union[str, Any] ) -> int:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCamelCase : Dict = self.conv(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int = None
SCREAMING_SNAKE_CASE__ :float = 0.0
SCREAMING_SNAKE_CASE__ :bool = None
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
_UpperCamelCase : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase : Dict = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : Dict = nn.Dense(__a , dtype=self.dtype )
_UpperCamelCase : str = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase : str = nn.Dropout(self.dropout_prob )
_UpperCamelCase : str = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCamelCase : Optional[Any] = None
if use_nin_shortcut:
_UpperCamelCase : str = nn.Conv(
__a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : int , __a : Optional[int] , __a : List[str] , __a : int=True ) -> Dict:
_UpperCamelCase : Tuple = hidden_states
_UpperCamelCase : int = self.norma(__a )
_UpperCamelCase : List[str] = nn.swish(__a )
_UpperCamelCase : Optional[int] = self.conva(__a )
_UpperCamelCase : List[Any] = self.time_emb_proj(nn.swish(__a ) )
_UpperCamelCase : Tuple = jnp.expand_dims(jnp.expand_dims(__a , 1 ) , 1 )
_UpperCamelCase : str = hidden_states + temb
_UpperCamelCase : int = self.norma(__a )
_UpperCamelCase : Dict = nn.swish(__a )
_UpperCamelCase : int = self.dropout(__a , __a )
_UpperCamelCase : List[Any] = self.conva(__a )
if self.conv_shortcut is not None:
_UpperCamelCase : List[str] = self.conv_shortcut(__a )
return hidden_states + residual
| 310
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 1
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE ( ctypes.Structure ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
if os.name == "nt":
_UpperCamelCase : List[Any] = CursorInfo()
_UpperCamelCase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase_ ,ctypes.byref(lowercase_ ) )
_UpperCamelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase_ ,ctypes.byref(lowercase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
_UpperCamelCase : Optional[Any] = CursorInfo()
_UpperCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase_ ,ctypes.byref(lowercase_ ) )
_UpperCamelCase : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase_ ,ctypes.byref(lowercase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=__a , )
assert hasattr(self , "env" )
def __SCREAMING_SNAKE_CASE ( self : str , __a : str ) -> Any:
# configuration for running training on smdistributed Model Parallel
_UpperCamelCase : Optional[int] = {
"enabled": True,
"processes_per_host": 8,
}
_UpperCamelCase : Any = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
_UpperCamelCase : str = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
_UpperCamelCase : str = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[Any] ) -> Dict:
TrainingJobAnalytics(__a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, Any] ) -> Tuple:
# create estimator
_UpperCamelCase : Tuple = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
_UpperCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCamelCase : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
| 310
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return "".join(chr(ord(lowercase_ ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 310
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 1
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = 0
SCREAMING_SNAKE_CASE__ :Optional[int] = 1
SCREAMING_SNAKE_CASE__ :Dict = 2
@add_end_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Dict , *__a : Any , **__a : Optional[int] ) -> int:
super().__init__(*__a , **__a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase : Optional[Any] = None
if self.model.config.prefix is not None:
_UpperCamelCase : List[str] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = self._sanitize_parameters(prefix=__a , **self._forward_params )
_UpperCamelCase : Any = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase : Optional[Any] = {**self._forward_params, **forward_params}
def __SCREAMING_SNAKE_CASE ( self : str , __a : str=None , __a : List[Any]=None , __a : Any=None , __a : Optional[int]=None , __a : str=None , __a : Any=None , __a : Tuple=None , __a : Dict=None , **__a : Tuple , ) -> List[Any]:
_UpperCamelCase : List[str] = {}
if prefix is not None:
_UpperCamelCase : Dict = prefix
if prefix:
_UpperCamelCase : List[Any] = self.tokenizer(
__a , padding=__a , add_special_tokens=__a , return_tensors=self.framework )
_UpperCamelCase : Optional[int] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
_UpperCamelCase : Dict = handle_long_generation
preprocess_params.update(__a )
_UpperCamelCase : List[Any] = generate_kwargs
_UpperCamelCase : str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
_UpperCamelCase : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
_UpperCamelCase : int = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase : Union[str, Any] = self.tokenizer.encode(__a , add_special_tokens=__a )
if len(__a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_UpperCamelCase : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __SCREAMING_SNAKE_CASE ( self : Dict , *__a : Any , **__a : Union[str, Any] ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*__a , **__a )
def __call__( self : Optional[Any] , __a : Tuple , **__a : str ) -> Optional[int]:
return super().__call__(__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Any , __a : List[Any]="" , __a : str=None , **__a : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=__a , add_special_tokens=__a , return_tensors=self.framework )
_UpperCamelCase : List[str] = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase : List[str] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase : Dict = generate_kwargs["max_new_tokens"]
else:
_UpperCamelCase : Optional[Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
_UpperCamelCase : str = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase : Optional[Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , **__a : List[str] ) -> Any:
_UpperCamelCase : Optional[Any] = model_inputs["input_ids"]
_UpperCamelCase : Any = model_inputs.get("attention_mask" , __a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase : Dict = None
_UpperCamelCase : Dict = None
_UpperCamelCase : Optional[Any] = 1
else:
_UpperCamelCase : List[Any] = input_ids.shape[0]
_UpperCamelCase : Optional[Any] = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase : Optional[Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
_UpperCamelCase : Dict = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase : int = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase : Tuple = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase : Tuple = self.model.generate(input_ids=__a , attention_mask=__a , **__a )
_UpperCamelCase : str = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase : List[str] = generated_sequence.reshape(__a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase : Optional[int] = tf.reshape(__a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple , __a : Optional[int]=ReturnType.FULL_TEXT , __a : Union[str, Any]=True ) -> Dict:
_UpperCamelCase : Dict = model_outputs["generated_sequence"][0]
_UpperCamelCase : List[str] = model_outputs["input_ids"]
_UpperCamelCase : Optional[int] = model_outputs["prompt_text"]
_UpperCamelCase : Any = generated_sequence.numpy().tolist()
_UpperCamelCase : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase : Optional[Any] = self.tokenizer.decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Union[str, Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase : int = prompt_text + text[prompt_length:]
else:
_UpperCamelCase : int = text[prompt_length:]
_UpperCamelCase : Optional[int] = {"generated_text": all_text}
records.append(__a )
return records
| 310
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "▁"
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase__ = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCamelCase__ = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
lowerCamelCase__ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :int = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ :List[int] = []
SCREAMING_SNAKE_CASE__ :List[int] = []
def __init__( self : Optional[int] , __a : int , __a : int=None , __a : List[Any]=None , __a : Tuple="</s>" , __a : Optional[Any]="</s>" , __a : str="<s>" , __a : List[str]="<unk>" , __a : List[str]="<pad>" , __a : List[Any]="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase : Optional[Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_UpperCamelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : str = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Dict = len(self.sp_model )
_UpperCamelCase : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
_UpperCamelCase : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCamelCase : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCamelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCamelCase : Tuple = src_lang if src_lang is not None else "en_XX"
_UpperCamelCase : Tuple = self.lang_code_to_id[self._src_lang]
_UpperCamelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : str ) -> None:
_UpperCamelCase : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ) -> Dict:
_UpperCamelCase : Union[str, Any] = self.__dict__.copy()
_UpperCamelCase : List[str] = None
return state
def __setstate__( self : Dict , __a : Dict ) -> None:
_UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : Optional[Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : int = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Any , __a : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[str] = ""
_UpperCamelCase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
_UpperCamelCase : List[str] = True
_UpperCamelCase : Any = []
else:
current_sub_tokens.append(__a )
_UpperCamelCase : Optional[Any] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : str = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_UpperCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_UpperCamelCase : Optional[int] = [1] * len(self.prefix_tokens )
_UpperCamelCase : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Any ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCamelCase : int = src_lang
_UpperCamelCase : Union[str, Any] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
_UpperCamelCase : int = self.convert_tokens_to_ids(__a )
_UpperCamelCase : Optional[Any] = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : Any , ) -> BatchEncoding:
_UpperCamelCase : List[Any] = src_lang
_UpperCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str ) -> None:
_UpperCamelCase : List[str] = self.lang_code_to_id[src_lang]
_UpperCamelCase : Optional[Any] = [self.cur_lang_code_id]
_UpperCamelCase : List[Any] = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str ) -> None:
_UpperCamelCase : str = self.lang_code_to_id[tgt_lang]
_UpperCamelCase : Dict = [self.cur_lang_code_id]
_UpperCamelCase : Optional[int] = [self.eos_token_id]
| 310
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = CpmAntTokenizer
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().setUp()
_UpperCamelCase : str = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Tuple = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
_UpperCamelCase : Union[str, Any] = "今天天气真好!"
_UpperCamelCase : Optional[int] = ["今天", "天气", "真", "好", "!"]
_UpperCamelCase : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Dict = "今天天气真好!"
_UpperCamelCase : str = [tokenizer.bos_token] + tokens
_UpperCamelCase : Tuple = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
_UpperCamelCase : Optional[int] = tokenizer.decode(__a )
self.assertEqual(__a , __a )
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = generate_pascal_triangle(lowercase_ )
for row_idx in range(lowercase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=" " )
else:
print(triangle[row_idx][col_idx] ,end="" )
print()
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCamelCase : list[list[int]] = []
for current_row_idx in range(lowercase_ ):
_UpperCamelCase : Any = populate_current_row(lowercase_ ,lowercase_ )
triangle.append(lowercase_ )
return triangle
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = 1, 1
for current_col_idx in range(1 ,lowercase_ ):
calculate_current_element(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
return current_row
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> None:
"""simple docstring"""
_UpperCamelCase : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
_UpperCamelCase : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
_UpperCamelCase : str = above_to_left_elt + above_to_right_elt
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCamelCase : list[list[int]] = [[1]]
for row_index in range(1 ,lowercase_ ):
_UpperCamelCase : Tuple = [0] + result[-1] + [0]
_UpperCamelCase : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
_UpperCamelCase : List[Any] = sum(divmod(lowercase_ ,2 ) )
_UpperCamelCase : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
_UpperCamelCase : Any = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_UpperCamelCase : int = row_first_half + row_second_half
result.append(lowercase_ )
return result
def lowercase__ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ ,lowercase_ ) -> None:
_UpperCamelCase : int = F'''{func.__name__}({value})'''
_UpperCamelCase : Optional[int] = timeit(F'''__main__.{call}''' ,setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase_ ,lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 310
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "xlm-roberta"
def __init__( self : Union[str, Any] , __a : List[Any]=3_0522 , __a : Union[str, Any]=768 , __a : List[Any]=12 , __a : Tuple=12 , __a : Dict=3072 , __a : Tuple="gelu" , __a : Dict=0.1 , __a : Any=0.1 , __a : Optional[int]=512 , __a : Optional[int]=2 , __a : List[Any]=0.02 , __a : str=1e-1_2 , __a : int=1 , __a : Tuple=0 , __a : Optional[int]=2 , __a : Tuple="absolute" , __a : Union[str, Any]=True , __a : Any=None , **__a : Any , ) -> Optional[int]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Union[str, Any] = position_embedding_type
_UpperCamelCase : int = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 1
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase__ = 6_3_7_8_1_3_7.0
lowerCamelCase__ = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCamelCase__ = 637_8137
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCamelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
_UpperCamelCase : Any = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCamelCase : Optional[Any] = haversine_distance(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCamelCase : int = (b_lata + b_lata) / 2
_UpperCamelCase : Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCamelCase : Union[str, Any] = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
_UpperCamelCase : Optional[Any] = cos(sigma / 2 ) ** 2
_UpperCamelCase : Any = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCamelCase : Union[str, Any] = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
_UpperCamelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCamelCase : List[str] = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__a ).to(__a )
_UpperCamelCase : int = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCamelCase : Tuple = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCamelCase : List[str] = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCamelCase : Union[str, Any] = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss
_UpperCamelCase : Tuple = -(labels.shape[-1] * loss.item())
_UpperCamelCase : Tuple = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 310
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : Optional[int] = str(lowercase_ )
return n == n[::-1]
def lowercase__ ( lowercase_ = 1_000_000 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
for i in range(1 ,lowercase_ ):
if is_palindrome(lowercase_ ) and is_palindrome(bin(lowercase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
lowerCamelCase__ = {
"camembert-base": 512,
}
lowerCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :int = ["input_ids", "attention_mask"]
def __init__( self : Dict , __a : int , __a : Optional[Any]="<s>" , __a : int="</s>" , __a : Tuple="</s>" , __a : Optional[int]="<s>" , __a : str="<unk>" , __a : Dict="<pad>" , __a : List[str]="<mask>" , __a : int=["<s>NOTUSED", "</s>NOTUSED"] , __a : Optional[Dict[str, Any]] = None , **__a : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_UpperCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_UpperCamelCase : List[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_UpperCamelCase : Optional[Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
_UpperCamelCase : Optional[int] = len(self.fairseq_tokens_to_ids )
_UpperCamelCase : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_UpperCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : str = [self.cls_token_id]
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
_UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[str] ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = []
else:
current_sub_tokens.append(__a )
_UpperCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self : List[Any] ) -> Dict:
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : int = None
return state
def __setstate__( self : Optional[int] , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : Any = {}
_UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Union[str, Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_UpperCamelCase : List[Any] = 128
elif "12-12" in model_name:
_UpperCamelCase : Optional[int] = 12
_UpperCamelCase : str = 12
elif "14-14" in model_name:
_UpperCamelCase : Optional[Any] = 14
_UpperCamelCase : Tuple = 14
elif "16-16" in model_name:
_UpperCamelCase : Dict = 16
_UpperCamelCase : Dict = 16
else:
raise ValueError("Model not supported" )
_UpperCamelCase : Optional[Any] = "huggingface/label-files"
if "speech-commands" in model_name:
_UpperCamelCase : Any = 35
_UpperCamelCase : Optional[Any] = "speech-commands-v2-id2label.json"
else:
_UpperCamelCase : List[Any] = 527
_UpperCamelCase : Optional[int] = "audioset-id2label.json"
_UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : Union[str, Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
if "module.v" in name:
_UpperCamelCase : Tuple = name.replace("module.v" ,"audio_spectrogram_transformer" )
if "cls_token" in name:
_UpperCamelCase : Any = name.replace("cls_token" ,"embeddings.cls_token" )
if "dist_token" in name:
_UpperCamelCase : Optional[Any] = name.replace("dist_token" ,"embeddings.distillation_token" )
if "pos_embed" in name:
_UpperCamelCase : Union[str, Any] = name.replace("pos_embed" ,"embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_UpperCamelCase : Dict = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace("blocks" ,"encoder.layer" )
if "attn.proj" in name:
_UpperCamelCase : List[str] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : int = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : List[Any] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : int = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : Tuple = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Any = name.replace("mlp.fc2" ,"output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_UpperCamelCase : int = name.replace("audio_spectrogram_transformer.norm" ,"audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_UpperCamelCase : List[Any] = name.replace("module.mlp_head.0" ,"classifier.layernorm" )
if "module.mlp_head.1" in name:
_UpperCamelCase : Dict = name.replace("module.mlp_head.1" ,"classifier.dense" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : List[str] = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_UpperCamelCase : str = key.split("." )
_UpperCamelCase : int = int(key_split[3] )
_UpperCamelCase : Optional[Any] = config.hidden_size
if "weight" in key:
_UpperCamelCase : Union[str, Any] = val[:dim, :]
_UpperCamelCase : Optional[Any] = val[dim : dim * 2, :]
_UpperCamelCase : Any = val[-dim:, :]
else:
_UpperCamelCase : str = val[:dim]
_UpperCamelCase : str = val[dim : dim * 2]
_UpperCamelCase : int = val[-dim:]
else:
_UpperCamelCase : List[str] = val
return orig_state_dict
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Any = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase_ ,lowercase_ )
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = get_audio_spectrogram_transformer_config(lowercase_ )
_UpperCamelCase : Optional[int] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_UpperCamelCase : Dict = model_name_to_url[model_name]
_UpperCamelCase : Dict = torch.hub.load_state_dict_from_url(lowercase_ ,map_location="cpu" )
# remove some keys
remove_keys(lowercase_ )
# rename some keys
_UpperCamelCase : Optional[Any] = convert_state_dict(lowercase_ ,lowercase_ )
# load 🤗 model
_UpperCamelCase : List[str] = ASTForAudioClassification(lowercase_ )
model.eval()
model.load_state_dict(lowercase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_UpperCamelCase : List[str] = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978
_UpperCamelCase : Any = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526
_UpperCamelCase : Any = 1_024 if "speech-commands" not in model_name else 128
_UpperCamelCase : Union[str, Any] = ASTFeatureExtractor(mean=lowercase_ ,std=lowercase_ ,max_length=lowercase_ )
if "speech-commands" in model_name:
_UpperCamelCase : int = load_dataset("speech_commands" ,"v0.02" ,split="validation" )
_UpperCamelCase : Dict = dataset[0]["audio"]["array"]
else:
_UpperCamelCase : str = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" ,filename="sample_audio.flac" ,repo_type="dataset" ,)
_UpperCamelCase, _UpperCamelCase : Dict = torchaudio.load(lowercase_ )
_UpperCamelCase : int = waveform.squeeze().numpy()
_UpperCamelCase : Any = feature_extractor(lowercase_ ,sampling_rate=16_000 ,return_tensors="pt" )
# forward pass
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : Any = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_UpperCamelCase : Dict = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_UpperCamelCase : List[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_UpperCamelCase : List[str] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_UpperCamelCase : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_UpperCamelCase : Tuple = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_UpperCamelCase : Optional[Any] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_UpperCamelCase : Optional[int] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_UpperCamelCase : Union[str, Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowercase_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : str , __a : str ) -> Dict:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = text, pattern
_UpperCamelCase, _UpperCamelCase : List[Any] = len(__a ), len(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __SCREAMING_SNAKE_CASE ( self : int , __a : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __SCREAMING_SNAKE_CASE ( self : int ) -> list[int]:
# searches pattern in text and returns index positions
_UpperCamelCase : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase : List[str] = self.mismatch_in_text(__a )
if mismatch_index == -1:
positions.append(__a )
else:
_UpperCamelCase : Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCamelCase__ = "ABAABA"
lowerCamelCase__ = "AB"
lowerCamelCase__ = BoyerMooreSearch(text, pattern)
lowerCamelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 310
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "wav2vec2"
def __init__( self : str , __a : str=32 , __a : int=768 , __a : str=12 , __a : List[Any]=12 , __a : List[Any]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : Any=0.1 , __a : Optional[int]=0.1 , __a : List[Any]=0.0 , __a : Optional[Any]=0.0 , __a : str=0.1 , __a : List[str]=0.1 , __a : Any=0.02 , __a : Optional[int]=1e-5 , __a : List[str]="group" , __a : str="gelu" , __a : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __a : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __a : Dict=(10, 3, 3, 3, 3, 2, 2) , __a : Optional[int]=False , __a : List[Any]=128 , __a : List[str]=16 , __a : str=False , __a : Optional[Any]=True , __a : Union[str, Any]=0.05 , __a : Dict=10 , __a : Tuple=2 , __a : Any=0.0 , __a : Optional[Any]=10 , __a : Union[str, Any]=0 , __a : List[Any]=320 , __a : Any=2 , __a : List[Any]=0.1 , __a : Dict=100 , __a : Any=256 , __a : Optional[Any]=256 , __a : str=0.1 , __a : Any="sum" , __a : Optional[Any]=False , __a : int=False , __a : int=256 , __a : Dict=(512, 512, 512, 512, 1500) , __a : Optional[Any]=(5, 3, 3, 1, 1) , __a : List[Any]=(1, 2, 3, 1, 1) , __a : List[str]=512 , __a : List[Any]=0 , __a : Tuple=1 , __a : int=2 , __a : List[Any]=False , __a : List[str]=3 , __a : Dict=2 , __a : str=3 , __a : Tuple=None , __a : Tuple=None , **__a : Dict , ) -> List[Any]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = feat_extract_norm
_UpperCamelCase : Optional[int] = feat_extract_activation
_UpperCamelCase : Optional[int] = list(__a )
_UpperCamelCase : str = list(__a )
_UpperCamelCase : List[Any] = list(__a )
_UpperCamelCase : List[Any] = conv_bias
_UpperCamelCase : str = num_conv_pos_embeddings
_UpperCamelCase : Any = num_conv_pos_embedding_groups
_UpperCamelCase : Union[str, Any] = len(self.conv_dim )
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Tuple = hidden_dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : Dict = activation_dropout
_UpperCamelCase : Any = feat_proj_dropout
_UpperCamelCase : List[Any] = final_dropout
_UpperCamelCase : Dict = layerdrop
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : int = do_stable_layer_norm
_UpperCamelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : Optional[int] = apply_spec_augment
_UpperCamelCase : List[str] = mask_time_prob
_UpperCamelCase : List[Any] = mask_time_length
_UpperCamelCase : Dict = mask_time_min_masks
_UpperCamelCase : str = mask_feature_prob
_UpperCamelCase : Optional[Any] = mask_feature_length
_UpperCamelCase : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase : Dict = num_codevectors_per_group
_UpperCamelCase : List[str] = num_codevector_groups
_UpperCamelCase : Union[str, Any] = contrastive_logits_temperature
_UpperCamelCase : List[Any] = feat_quantizer_dropout
_UpperCamelCase : Dict = num_negatives
_UpperCamelCase : Optional[int] = codevector_dim
_UpperCamelCase : Union[str, Any] = proj_codevector_dim
_UpperCamelCase : Tuple = diversity_loss_weight
# ctc loss
_UpperCamelCase : Union[str, Any] = ctc_loss_reduction
_UpperCamelCase : List[Any] = ctc_zero_infinity
# adapter
_UpperCamelCase : str = add_adapter
_UpperCamelCase : Optional[Any] = adapter_kernel_size
_UpperCamelCase : Optional[int] = adapter_stride
_UpperCamelCase : int = num_adapter_layers
_UpperCamelCase : List[Any] = output_hidden_size or hidden_size
_UpperCamelCase : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : Union[str, Any] = list(__a )
_UpperCamelCase : Any = list(__a )
_UpperCamelCase : Dict = list(__a )
_UpperCamelCase : Tuple = xvector_output_dim
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ :int = 10
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Dict ) -> int:
_UpperCamelCase : Tuple = {
"num_train_timesteps": 1100,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCamelCase : List[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Any = self.dummy_model()
_UpperCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : List[str] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : List[str] = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : str = model(__a , __a )
_UpperCamelCase : Optional[int] = scheduler.step(__a , __a , __a )
_UpperCamelCase : Tuple = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(__a ) )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
if torch_device == "mps":
return
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : int = self.get_scheduler_config()
_UpperCamelCase : str = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : List[str] = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : str = model(__a , __a )
_UpperCamelCase : Any = scheduler.step(__a , __a , __a )
_UpperCamelCase : int = output.prev_sample
_UpperCamelCase : Tuple = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Any = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
if torch_device == "mps":
return
_UpperCamelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCamelCase : List[str] = self.get_scheduler_config()
_UpperCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : Dict = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase : Dict = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : Optional[Any] = model(__a , __a )
_UpperCamelCase : Union[str, Any] = scheduler.step(__a , __a , __a )
_UpperCamelCase : Optional[int] = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(__a ) )
_UpperCamelCase : str = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Whether to use SortishSampler or not."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[Union[str, Path, GenerationConfig]] = field(
default=_UpperCamelCase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Dict = super().to_dict()
for k, v in d.items():
if isinstance(__a , __a ):
_UpperCamelCase : Tuple = v.to_dict()
return d
| 310
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 1
|
"""simple docstring"""
import random
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : str ) -> tuple[list[int], list[int]]:
_UpperCamelCase : List[Any] = [ord(__a ) for i in text]
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = []
for i in plain:
_UpperCamelCase : List[str] = random.randint(1 , 300 )
_UpperCamelCase : List[str] = (i + k) * k
cipher.append(__a )
key.append(__a )
return cipher, key
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : list[int] , __a : list[int] ) -> str:
_UpperCamelCase : int = []
for i in range(len(__a ) ):
_UpperCamelCase : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__a ) )
return "".join(__a )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = 0
SCREAMING_SNAKE_CASE__ :bool = False
SCREAMING_SNAKE_CASE__ :float = 3.0
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__a ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_UpperCamelCase : Dict = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_UpperCamelCase : int = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_UpperCamelCase : Tuple = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __a )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
_UpperCamelCase : Optional[int] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase__ = torch.nn.Linear(100, 200)
lowerCamelCase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase__ = ""
lowerCamelCase__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 1
|
"""simple docstring"""
import os
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.dirname(lowercase_ ) + "/p022_names.txt" ) as file:
_UpperCamelCase : str = str(file.readlines()[0] )
_UpperCamelCase : Union[str, Any] = names.replace("\"" ,"" ).split("," )
names.sort()
_UpperCamelCase : Dict = 0
_UpperCamelCase : Union[str, Any] = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 1
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = AlbertConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : Optional[int] = AlbertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 310
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 1
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "The Nymphenburg Palace is a beautiful palace in Munich!"
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
_UpperCamelCase : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_UpperCamelCase : int = BERTEncoder(
attention_cell=predefined_args["attention_cell"] ,num_layers=predefined_args["num_layers"] ,units=predefined_args["units"] ,hidden_size=predefined_args["hidden_size"] ,max_length=predefined_args["max_length"] ,num_heads=predefined_args["num_heads"] ,scaled=predefined_args["scaled"] ,dropout=predefined_args["dropout"] ,output_attention=lowercase_ ,output_all_encodings=lowercase_ ,use_residual=predefined_args["use_residual"] ,activation=predefined_args.get("activation" ,"gelu" ) ,layer_norm_eps=predefined_args.get("layer_norm_eps" ,lowercase_ ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_UpperCamelCase : int = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_UpperCamelCase : Union[str, Any] = os.path.join(get_home_dir() ,"models" )
_UpperCamelCase : Optional[Any] = _load_vocab(lowercase_ ,lowercase_ ,lowercase_ ,cls=lowercase_ )
_UpperCamelCase : Any = nlp.model.BERTModel(
lowercase_ ,len(lowercase_ ) ,units=predefined_args["units"] ,embed_size=predefined_args["embed_size"] ,embed_dropout=predefined_args["embed_dropout"] ,word_embed=predefined_args["word_embed"] ,use_pooler=lowercase_ ,use_token_type_embed=lowercase_ ,token_type_vocab_size=predefined_args["token_type_vocab_size"] ,use_classifier=lowercase_ ,use_decoder=lowercase_ ,)
original_bort.load_parameters(lowercase_ ,cast_dtype=lowercase_ ,ignore_extra=lowercase_ )
_UpperCamelCase : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
_UpperCamelCase : Optional[Any] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowercase_ ),
}
_UpperCamelCase : int = BertConfig.from_dict(lowercase_ )
_UpperCamelCase : Dict = BertForMaskedLM(lowercase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase_ ,lowercase_ ):
_UpperCamelCase : Optional[Any] = hf_param.shape
_UpperCamelCase : Dict = to_torch(params[gluon_param] )
_UpperCamelCase : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_UpperCamelCase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"word_embed.0.weight" )
_UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"encoder.position_weight" )
_UpperCamelCase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"encoder.layer_norm.beta" )
_UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_UpperCamelCase : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_UpperCamelCase : BertSelfAttention = layer.attention.self
_UpperCamelCase : str = check_and_map_params(
self_attn.key.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_UpperCamelCase : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_UpperCamelCase : str = check_and_map_params(
self_attn.query.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_UpperCamelCase : Any = check_and_map_params(
self_attn.value.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_UpperCamelCase : BertSelfOutput = layer.attention.output
_UpperCamelCase : Optional[Any] = check_and_map_params(
self_output.dense.bias ,F'''encoder.transformer_cells.{i}.proj.bias''' )
_UpperCamelCase : Union[str, Any] = check_and_map_params(
self_output.dense.weight ,F'''encoder.transformer_cells.{i}.proj.weight''' )
_UpperCamelCase : int = check_and_map_params(
self_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_UpperCamelCase : Dict = check_and_map_params(
self_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_UpperCamelCase : BertIntermediate = layer.intermediate
_UpperCamelCase : int = check_and_map_params(
intermediate.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_UpperCamelCase : List[str] = check_and_map_params(
intermediate.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_UpperCamelCase : BertOutput = layer.output
_UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_UpperCamelCase : Optional[Any] = check_and_map_params(
bert_output.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_UpperCamelCase : Tuple = check_and_map_params(
bert_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_UpperCamelCase : str = check_and_map_params(
bert_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_UpperCamelCase : Dict = RobertaTokenizer.from_pretrained("roberta-base" )
_UpperCamelCase : str = tokenizer.encode_plus(lowercase_ )["input_ids"]
# Get gluon output
_UpperCamelCase : Optional[Any] = mx.nd.array([input_ids] )
_UpperCamelCase : Optional[int] = original_bort(inputs=lowercase_ ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = BertModel.from_pretrained(lowercase_ )
hf_bort_model.eval()
_UpperCamelCase : Optional[Any] = tokenizer.encode_plus(lowercase_ ,return_tensors="pt" )
_UpperCamelCase : Optional[int] = hf_bort_model(**lowercase_ )[0]
_UpperCamelCase : int = output_gluon[0].asnumpy()
_UpperCamelCase : int = output_hf[0].detach().numpy()
_UpperCamelCase : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_UpperCamelCase : List[str] = np.allclose(lowercase_ ,lowercase_ ,atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 310
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowercase__ ( ) -> None:
"""simple docstring"""
print("Making key files..." )
make_key_files("rsa" ,1_024 )
print("Key files generation successful." )
def lowercase__ ( lowercase_ ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("Generating prime p..." )
_UpperCamelCase : int = rabinMiller.generate_large_prime(lowercase_ )
print("Generating prime q..." )
_UpperCamelCase : Tuple = rabinMiller.generate_large_prime(lowercase_ )
_UpperCamelCase : Any = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_UpperCamelCase : Union[str, Any] = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(lowercase_ ,(p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_UpperCamelCase : Dict = cryptoMath.find_mod_inverse(lowercase_ ,(p - 1) * (q - 1) )
_UpperCamelCase : List[str] = (n, e)
_UpperCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
_UpperCamelCase, _UpperCamelCase : Dict = generate_key(lowercase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"w" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"w" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase__ = 1.0_5457_1817E-34 # unit of ℏ : J * s
lowerCamelCase__ = 3E8 # unit of c : m * s^-1
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_UpperCamelCase : List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCamelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCamelCase : int = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["DPTFeatureExtractor"]
lowerCamelCase__ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 1
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase__ ( ) -> Dict:
"""simple docstring"""
from torch.utils.cpp_extension import load
_UpperCamelCase : Optional[Any] = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
_UpperCamelCase : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" ,"ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" ,"ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" ,lowercase_ ,with_cuda=lowercase_ ,extra_include_paths=[str(lowercase_ )] ,extra_cflags=["-DWITH_CUDA=1"] ,extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 310
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("T")
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :deque[T] # Cache store of keys
SCREAMING_SNAKE_CASE__ :set[T] # References of the keys in cache
SCREAMING_SNAKE_CASE__ :int = 10 # Maximum capacity of cache
def __init__( self : Tuple , __a : int ) -> None:
_UpperCamelCase : Tuple = deque()
_UpperCamelCase : str = set()
if not n:
_UpperCamelCase : int = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_UpperCamelCase : List[str] = n
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase : Union[str, Any] = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
for k in self.dq_store:
print(__a )
def __repr__( self : Dict ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 310
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
from math import sqrt
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = 0
for i in range(1 ,int(sqrt(lowercase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase_ ):
total += i + n // i
elif i == sqrt(lowercase_ ):
total += i
return total - n
def lowercase__ ( lowercase_ = 10_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = sum(
i
for i in range(1 ,lowercase_ )
if sum_of_divisors(sum_of_divisors(lowercase_ ) ) == i and sum_of_divisors(lowercase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "swinv2"
SCREAMING_SNAKE_CASE__ :Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , __a : str=224 , __a : Dict=4 , __a : Any=3 , __a : Optional[int]=96 , __a : Optional[Any]=[2, 2, 6, 2] , __a : Dict=[3, 6, 12, 24] , __a : Optional[Any]=7 , __a : Optional[Any]=4.0 , __a : Dict=True , __a : Optional[int]=0.0 , __a : Dict=0.0 , __a : List[Any]=0.1 , __a : Tuple="gelu" , __a : Optional[int]=False , __a : Union[str, Any]=0.02 , __a : Tuple=1e-5 , __a : str=32 , **__a : Optional[Any] , ) -> Any:
super().__init__(**__a )
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[int] = embed_dim
_UpperCamelCase : Tuple = depths
_UpperCamelCase : int = len(__a )
_UpperCamelCase : List[Any] = num_heads
_UpperCamelCase : Union[str, Any] = window_size
_UpperCamelCase : List[Any] = mlp_ratio
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : List[str] = drop_path_rate
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Tuple = use_absolute_embeddings
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : str = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase : str = int(embed_dim * 2 ** (len(__a ) - 1) )
_UpperCamelCase : Any = (0, 0, 0, 0)
| 310
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : Tuple ) -> Tuple:
super().__init__(*__a , **__a )
_UpperCamelCase : int = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , *__a : Optional[Any] , **__a : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : str = super().add_tokens(__a , *__a , **__a )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , *__a : Optional[int] , __a : List[str]=1 , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__a , *__a , **__a )
output.append(__a )
else:
_UpperCamelCase : Optional[Any] = []
for i in range(__a ):
_UpperCamelCase : List[str] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__a , *__a , **__a )
output.append(__a )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
_UpperCamelCase : str = output
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , __a : Tuple=False , __a : List[Any]=1.0 ) -> Union[str, Any]:
if isinstance(__a , __a ):
_UpperCamelCase : Tuple = []
for i in range(len(__a ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__a ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCamelCase : Optional[Any] = self.token_map[placeholder_token]
_UpperCamelCase : Union[str, Any] = tokens[: 1 + int(len(__a ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCamelCase : int = copy.copy(__a )
random.shuffle(__a )
_UpperCamelCase : List[Any] = text.replace(__a , " ".join(__a ) )
return text
def __call__( self : int , __a : Union[str, Any] , *__a : List[Any] , __a : Optional[Any]=False , __a : Union[str, Any]=1.0 , **__a : Tuple ) -> List[Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__a , vector_shuffle=__a , prop_tokens_to_load=__a ) , *__a , **__a , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str , *__a : List[str] , __a : List[Any]=False , __a : str=1.0 , **__a : List[Any] ) -> str:
return super().encode(
self.replace_placeholder_tokens_in_text(
__a , vector_shuffle=__a , prop_tokens_to_load=__a ) , *__a , **__a , )
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "biogpt"
def __init__( self : Optional[int] , __a : List[Any]=4_2384 , __a : str=1024 , __a : Optional[int]=24 , __a : Optional[int]=16 , __a : List[str]=4096 , __a : Union[str, Any]="gelu" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=1024 , __a : List[Any]=0.02 , __a : Dict=1e-1_2 , __a : Optional[Any]=True , __a : str=True , __a : int=0.0 , __a : Optional[int]=0.0 , __a : Union[str, Any]=1 , __a : Optional[int]=0 , __a : int=2 , **__a : int , ) -> Any:
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : List[Any] = scale_embedding
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : str = layerdrop
_UpperCamelCase : Dict = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(lowercase_ ,lowercase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , __a : Any , __a : Optional[Any]=13 , __a : List[Any]=7 , __a : List[str]=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : int=99 , __a : int=32 , __a : Dict=5 , __a : List[Any]=4 , __a : Tuple=37 , __a : List[str]="gelu" , __a : int=0.1 , __a : Optional[int]=0.1 , __a : str=512 , __a : Dict=16 , __a : Optional[int]=2 , __a : str=0.02 , __a : Any=4 , ) -> List[Any]:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[Any] = use_attention_mask
_UpperCamelCase : Optional[int] = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : Optional[Any] = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Tuple = num_choices
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : int = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : Tuple = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : List[str] = True
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Any = FlaxRobertaModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained("roberta-base" , from_pt=__a )
_UpperCamelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 310
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 1
|
"""simple docstring"""
lowerCamelCase__ = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 310
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_UpperCamelCase : Union[str, Any] = len(bin(lowercase_ )[3:] )
_UpperCamelCase : Optional[Any] = bin(abs(lowercase_ ) - (1 << binary_number_length) )[3:]
_UpperCamelCase : int = (
(
"1"
+ "0" * (binary_number_length - len(lowercase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 10**-10 ) -> float:
"""simple docstring"""
_UpperCamelCase : Dict = a
while True:
_UpperCamelCase : Optional[Any] = Decimal(lowercase_ ) - (
Decimal(eval(lowercase_ ) ) / Decimal(eval(str(diff(lowercase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase_ ) ) < precision: # noqa: S307
return float(lowercase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "down"
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : int = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[Any] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : str = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Dict = "down"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
_UpperCamelCase, _UpperCamelCase : str = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : str = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase : Optional[int] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Tuple = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_encoder_hidden_states=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : List[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[Any] = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
_UpperCamelCase : Optional[int] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[int] = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
_UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
_UpperCamelCase : Any = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
_UpperCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : List[Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Dict = "mid"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : List[Any] = {
"in_channels": 32,
"temb_channels": 128,
}
_UpperCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : int = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[int] = "mid"
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : Union[str, Any] = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : int = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "mid"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return super().get_dummy_input(include_encoder_hidden_states=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase, _UpperCamelCase : Tuple = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : List[Any] = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :List[str] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
_UpperCamelCase : Any = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[Any] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[Any] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : List[Any] = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :int = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__a , include_encoder_hidden_states=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : int = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : int = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[int] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : Dict = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :List[str] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : int = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Tuple = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :int = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : str = {"in_channels": 32, "out_channels": 32}
_UpperCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : List[str] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :List[str] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : int = {"in_channels": 32, "out_channels": 32}
_UpperCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : str = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(__a )
| 310
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = "informer"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = None , __a : Optional[Union[str, bool]] = "mean" , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 64 , __a : int = 32 , __a : int = 32 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : bool = True , __a : str = "gelu" , __a : float = 0.05 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 100 , __a : float = 0.02 , __a : int=True , __a : str = "prob" , __a : int = 5 , __a : bool = True , **__a : Union[str, Any] , ) -> Optional[Any]:
# time series specific configuration
_UpperCamelCase : Tuple = prediction_length
_UpperCamelCase : Tuple = context_length or prediction_length
_UpperCamelCase : int = distribution_output
_UpperCamelCase : int = loss
_UpperCamelCase : Optional[Any] = input_size
_UpperCamelCase : Tuple = num_time_features
_UpperCamelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_UpperCamelCase : Dict = scaling
_UpperCamelCase : List[Any] = num_dynamic_real_features
_UpperCamelCase : Tuple = num_static_real_features
_UpperCamelCase : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCamelCase : Dict = cardinality
else:
_UpperCamelCase : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCamelCase : Dict = embedding_dimension
else:
_UpperCamelCase : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCamelCase : str = num_parallel_samples
# Transformer architecture configuration
_UpperCamelCase : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_attention_heads
_UpperCamelCase : List[str] = decoder_attention_heads
_UpperCamelCase : List[str] = encoder_ffn_dim
_UpperCamelCase : str = decoder_ffn_dim
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : int = decoder_layers
_UpperCamelCase : Any = dropout
_UpperCamelCase : Tuple = attention_dropout
_UpperCamelCase : Dict = activation_dropout
_UpperCamelCase : List[Any] = encoder_layerdrop
_UpperCamelCase : List[str] = decoder_layerdrop
_UpperCamelCase : Optional[Any] = activation_function
_UpperCamelCase : Optional[Any] = init_std
_UpperCamelCase : Optional[int] = use_cache
# Informer
_UpperCamelCase : Union[str, Any] = attention_type
_UpperCamelCase : int = sampling_factor
_UpperCamelCase : List[str] = distil
super().__init__(is_encoder_decoder=__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : Optional[Any] ) -> int:
# we need a list not a string, so do something to change the type
_UpperCamelCase : Optional[int] = arr.split("," )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : List[Any] = [int(self.array[0] )] * len(self.array )
_UpperCamelCase : Tuple = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_UpperCamelCase : List[Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_UpperCamelCase : Dict = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCamelCase__ = input("please input some numbers:")
lowerCamelCase__ = SubArray(whole_array)
lowerCamelCase__ = array.solve_sub_array()
print(("the results is:", re))
| 310
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :UNetaDModel
SCREAMING_SNAKE_CASE__ :ScoreSdeVeScheduler
def __init__( self : int , __a : UNetaDModel , __a : ScoreSdeVeScheduler ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : List[Any] , __a : int = 1 , __a : int = 2000 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[str] = "pil" , __a : bool = True , **__a : Dict , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCamelCase : Optional[Any] = self.unet.config.sample_size
_UpperCamelCase : str = (batch_size, 3, img_size, img_size)
_UpperCamelCase : Dict = self.unet
_UpperCamelCase : Dict = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma
_UpperCamelCase : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(__a )
self.scheduler.set_sigmas(__a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCamelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_UpperCamelCase : int = self.unet(__a , __a ).sample
_UpperCamelCase : Any = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample
# prediction step
_UpperCamelCase : List[str] = model(__a , __a ).sample
_UpperCamelCase : Any = self.scheduler.step_pred(__a , __a , __a , generator=__a )
_UpperCamelCase, _UpperCamelCase : Tuple = output.prev_sample, output.prev_sample_mean
_UpperCamelCase : str = sample_mean.clamp(0 , 1 )
_UpperCamelCase : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase : Dict = self.numpy_to_pil(__a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a )
| 310
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : Dict = analyze_text(lowercase_ )
_UpperCamelCase : int = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCamelCase : Tuple = sum(single_char_strings.values() )
# one length string
_UpperCamelCase : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCamelCase : Tuple = single_char_strings[ch]
_UpperCamelCase : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_UpperCamelCase : List[str] = sum(two_char_strings.values() )
_UpperCamelCase : List[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCamelCase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_UpperCamelCase : Any = two_char_strings[sequence]
_UpperCamelCase : Tuple = int(lowercase_ ) / all_sum
my_sec_sum += prob * math.loga(lowercase_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowercase__ ( lowercase_ ) -> tuple[dict, dict]:
"""simple docstring"""
_UpperCamelCase : List[str] = Counter() # type: ignore
_UpperCamelCase : str = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(lowercase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 1
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_=None ,lowercase_=None ) -> Union[str, Any]:
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase_ )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
SCREAMING_SNAKE_CASE__ :List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
SCREAMING_SNAKE_CASE__ :List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Use FP16 to accelerate inference."} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Benchmark training of model"} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Verbose memory tracing"} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Trace memory line by line"} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Save result to a CSV file"} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Save all print statements in a log file"} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Whether to print environment information"} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
SCREAMING_SNAKE_CASE__ :str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
SCREAMING_SNAKE_CASE__ :str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
SCREAMING_SNAKE_CASE__ :str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
SCREAMING_SNAKE_CASE__ :str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
SCREAMING_SNAKE_CASE__ :str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
SCREAMING_SNAKE_CASE__ :str = field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
SCREAMING_SNAKE_CASE__ :int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , __a , )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 310
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = None ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = tesseract_config if tesseract_config is not None else ""
# apply OCR
_UpperCamelCase : Dict = to_pil_image(lowercase_ )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = pil_image.size
_UpperCamelCase : Optional[Any] = pytesseract.image_to_data(lowercase_ ,lang=lowercase_ ,output_type="dict" ,config=lowercase_ )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_UpperCamelCase : Union[str, Any] = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
_UpperCamelCase : List[str] = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Optional[Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : List[Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Union[str, Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Dict = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCamelCase : str = []
for x, y, w, h in zip(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
_UpperCamelCase : Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ ,lowercase_ ,lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["pixel_values"]
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : str , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Any = size if size is not None else {"height": 224, "width": 224}
_UpperCamelCase : str = get_size_dict(__a )
_UpperCamelCase : Optional[int] = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : List[Any] = resample
_UpperCamelCase : str = apply_ocr
_UpperCamelCase : Any = ocr_lang
_UpperCamelCase : List[Any] = tesseract_config
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
_UpperCamelCase : Dict = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCamelCase : List[str] = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Dict , ) -> PIL.Image.Image:
_UpperCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : List[str] = size if size is not None else self.size
_UpperCamelCase : Optional[int] = get_size_dict(__a )
_UpperCamelCase : Union[str, Any] = resample if resample is not None else self.resample
_UpperCamelCase : int = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCamelCase : List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCamelCase : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCamelCase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Any = [to_numpy_array(__a ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
_UpperCamelCase : Dict = []
_UpperCamelCase : int = []
for image in images:
_UpperCamelCase, _UpperCamelCase : List[str] = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
_UpperCamelCase : Tuple = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCamelCase : int = [flip_channel_order(__a ) for image in images]
_UpperCamelCase : str = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : Union[str, Any] = BatchFeature(data={"pixel_values": images} , tensor_type=__a )
if apply_ocr:
_UpperCamelCase : str = words_batch
_UpperCamelCase : Optional[Any] = boxes_batch
return data
| 310
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __SCREAMING_SNAKE_CASE ( __a : ArgumentParser ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
raise NotImplementedError()
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 200 ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = [1, 2, 5, 10, 20, 50, 100, 200]
_UpperCamelCase : Union[str, Any] = [0] * (pence + 1)
_UpperCamelCase : str = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase_ ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 310
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
def merge(lowercase_ ,lowercase_ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase_ ) <= 1:
return collection
_UpperCamelCase : Optional[int] = len(lowercase_ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 310
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = False
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
lowerCamelCase__ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
lowerCamelCase__ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
lowerCamelCase__ = reader.read()
lowerCamelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
lowerCamelCase__ = UNetaDModel(**config)
else:
lowerCamelCase__ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
lowerCamelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ = config[key]
del config[key]
lowerCamelCase__ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
lowerCamelCase__ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
lowerCamelCase__ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
lowerCamelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
lowerCamelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
lowerCamelCase__ = param_value
lowerCamelCase__ = True
if not has_changed:
lowerCamelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 310
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 1
|
"""simple docstring"""
from functools import reduce
lowerCamelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase__ ( lowercase_ = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase_ ,lowercase_ : str(int(lowercase_ ) * int(lowercase_ ) ) ,n[i : i + 13] ) )
for i in range(len(lowercase_ ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
import math
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return math.sqrt(lowercase_ ) * math.sqrt(lowercase_ ) == num
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = n
while left <= right:
_UpperCamelCase : Tuple = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCamelCase : Any = mid - 1
else:
_UpperCamelCase : Tuple = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Any = BertConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : Any = BertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 310
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=__a , )
assert hasattr(self , "env" )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> Any:
_UpperCamelCase : Tuple = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
_UpperCamelCase : List[str] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__a , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Dict:
TrainingJobAnalytics(__a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str ) -> Union[str, Any]:
# create estimator
_UpperCamelCase : Optional[Any] = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
_UpperCamelCase : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCamelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] ,lowercase_ ,lowercase_ ):
# Color current vertex
_UpperCamelCase : Tuple = i
# Validate coloring
if util_color(lowercase_ ,lowercase_ ,lowercase_ ,index + 1 ):
return True
# Backtrack
_UpperCamelCase : Dict = -1
return False
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : Dict = [-1] * len(lowercase_ )
if util_color(lowercase_ ,lowercase_ ,lowercase_ ,0 ):
return colored_vertices
return []
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "vit_msn"
def __init__( self : List[Any] , __a : Union[str, Any]=768 , __a : Union[str, Any]=12 , __a : Any=12 , __a : int=3072 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=0.0 , __a : int=0.0 , __a : Optional[Any]=0.02 , __a : Dict=1e-0_6 , __a : List[Any]=224 , __a : Optional[int]=16 , __a : int=3 , __a : str=True , **__a : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__a )
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Any = qkv_bias
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
_UpperCamelCase : Tuple = []
def generate(lowercase_ ,lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_UpperCamelCase, _UpperCamelCase : str = arr[k - 1], arr[i]
else: # k is odd
_UpperCamelCase, _UpperCamelCase : Any = arr[k - 1], arr[0]
generate(k - 1 ,lowercase_ )
generate(len(lowercase_ ) ,lowercase_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 310
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_UpperCamelCase : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
for node in graph )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
visited.add(lowercase_ )
rec_stk.add(lowercase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 310
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from math import sqrt
def lowercase__ ( lowercase_ = 1_000_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : int = 0
_UpperCamelCase : int = 0
_UpperCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase_ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ :int = TaTokenizer
SCREAMING_SNAKE_CASE__ :List[int] = []
def __init__( self : Any , __a : str=None , __a : Optional[int]=None , __a : Optional[Any]="</s>" , __a : List[Any]="<unk>" , __a : str="<pad>" , __a : Union[str, Any]=100 , __a : List[Any]=None , **__a : Any , ) -> List[str]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase : str = [F'''<extra_id_{i}>''' for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCamelCase : str = len(set(filter(lambda __a : bool("extra_id_" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
__a , tokenizer_file=__a , eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
_UpperCamelCase : List[str] = vocab_file
_UpperCamelCase : Optional[int] = False if not self.vocab_file else True
_UpperCamelCase : Optional[Any] = extra_ids
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : List[Any] , __a : int , __a : Optional[Any] ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCamelCase : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __a , )
return max_model_length
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Optional[int] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCamelCase : Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return list(
set(filter(lambda __a : bool(re.search(R"<extra_id_\d+>" , __a ) ) is not None , self.additional_special_tokens ) ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return [self.convert_tokens_to_ids(__a ) for token in self.get_sentinel_tokens()]
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase__ = logging.getLogger(__name__)
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ :str = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE__ :int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" ,lowercase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase : str = processors[data_args.task_name]()
_UpperCamelCase : List[Any] = processor.get_labels()
_UpperCamelCase : str = len(lowercase_ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowercase_ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_UpperCamelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_UpperCamelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase_ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_UpperCamelCase : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=lowercase_ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_UpperCamelCase : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=lowercase_ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(lowercase_ ) -> Dict:
_UpperCamelCase : Any = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(lowercase_ ,p.label_ids )}
# Data collator
_UpperCamelCase : int = DataCollatorWithPadding(lowercase_ ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase : List[str] = Trainer(
model=lowercase_ ,args=lowercase_ ,train_dataset=lowercase_ ,eval_dataset=lowercase_ ,compute_metrics=lowercase_ ,data_collator=lowercase_ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : int = os.path.join(training_args.output_dir ,"eval_results.txt" )
if trainer.is_world_master():
with open(lowercase_ ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" ,lowercase_ ,lowercase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowercase_ )
return results
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : Optional[Any] = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[Any] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : List[Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Tuple = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : int = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : List[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : int = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : List[Any] = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Tuple = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : Optional[int] = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :CommonSchedulerState
# setable values
SCREAMING_SNAKE_CASE__ :jnp.ndarray
SCREAMING_SNAKE_CASE__ :jnp.ndarray
SCREAMING_SNAKE_CASE__ :Optional[int] = None
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , __a : CommonSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray ) -> List[Any]:
return cls(common=__a , init_noise_sigma=__a , timesteps=__a )
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :DDPMSchedulerState
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ :jnp.dtype
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
return True
@register_to_config
def __init__( self : Union[str, Any] , __a : int = 1000 , __a : float = 0.00_01 , __a : float = 0.02 , __a : str = "linear" , __a : Optional[jnp.ndarray] = None , __a : str = "fixed_small" , __a : bool = True , __a : str = "epsilon" , __a : jnp.dtype = jnp.floataa , ) -> str:
_UpperCamelCase : List[str] = dtype
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
if common is None:
_UpperCamelCase : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_UpperCamelCase : Dict = jnp.array(1.0 , dtype=self.dtype )
_UpperCamelCase : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__a , init_noise_sigma=__a , timesteps=__a , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : Optional[int] = None ) -> jnp.ndarray:
return sample
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : DDPMSchedulerState , __a : int , __a : Tuple = () ) -> DDPMSchedulerState:
_UpperCamelCase : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_UpperCamelCase : Any = (jnp.arange(0 , __a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__a , timesteps=__a , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : DDPMSchedulerState , __a : List[str] , __a : Optional[Any]=None , __a : List[str]=None ) -> Optional[int]:
_UpperCamelCase : List[Any] = state.common.alphas_cumprod[t]
_UpperCamelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_UpperCamelCase : Dict = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_UpperCamelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_UpperCamelCase : List[Any] = jnp.clip(__a , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_UpperCamelCase : Dict = jnp.log(jnp.clip(__a , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
_UpperCamelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_UpperCamelCase : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_UpperCamelCase : str = variance
_UpperCamelCase : int = state.common.betas[t]
_UpperCamelCase : Dict = (predicted_variance + 1) / 2
_UpperCamelCase : Any = frac * max_log + (1 - frac) * min_log
return variance
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : int , __a : jnp.ndarray , __a : Optional[jax.random.KeyArray] = None , __a : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_UpperCamelCase : int = timestep
if key is None:
_UpperCamelCase : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_UpperCamelCase, _UpperCamelCase : List[str] = jnp.split(__a , sample.shape[1] , axis=1 )
else:
_UpperCamelCase : Dict = None
# 1. compute alphas, betas
_UpperCamelCase : Tuple = state.common.alphas_cumprod[t]
_UpperCamelCase : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_UpperCamelCase : str = 1 - alpha_prod_t
_UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_UpperCamelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_UpperCamelCase : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
_UpperCamelCase : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_UpperCamelCase : Union[str, Any] = jnp.clip(__a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_UpperCamelCase : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_UpperCamelCase : int = jax.random.split(__a , num=1 )
_UpperCamelCase : int = jax.random.normal(__a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__a , __a , predicted_variance=__a ) ** 0.5) * noise
_UpperCamelCase : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_UpperCamelCase : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__a , state=__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , ) -> jnp.ndarray:
return add_noise_common(state.common , __a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : DDPMSchedulerState , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , ) -> jnp.ndarray:
return get_velocity_common(state.common , __a , __a , __a )
def __len__( self : Optional[Any] ) -> Any:
return self.config.num_train_timesteps
| 310
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 1
|
"""simple docstring"""
import math
import sys
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = ""
try:
with open(lowercase_ ,"rb" ) as binary_file:
_UpperCamelCase : Optional[Any] = binary_file.read()
for dat in data:
_UpperCamelCase : Tuple = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = {"0": "0", "1": "1"}
_UpperCamelCase, _UpperCamelCase : Any = "", ""
_UpperCamelCase : Dict = len(lowercase_ )
for i in range(len(lowercase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCamelCase : List[Any] = lexicon[curr_string]
result += last_match_id
_UpperCamelCase : List[Any] = last_match_id + "0"
if math.loga(lowercase_ ).is_integer():
_UpperCamelCase : Tuple = {}
for curr_key in list(lowercase_ ):
_UpperCamelCase : Any = lexicon.pop(lowercase_ )
_UpperCamelCase : Dict = new_lex
_UpperCamelCase : Any = last_match_id + "1"
index += 1
_UpperCamelCase : Optional[Any] = ""
return result
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = 8
try:
with open(lowercase_ ,"wb" ) as opened_file:
_UpperCamelCase : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 ,len(lowercase_ ) ,lowercase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase_ ,2 ).to_bytes(1 ,byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : str = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCamelCase : int = data_bits[counter:]
_UpperCamelCase : List[str] = data_bits[counter + 1 :]
return data_bits
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : str = read_file_binary(lowercase_ )
_UpperCamelCase : Tuple = remove_prefix(lowercase_ )
_UpperCamelCase : Optional[int] = decompress_data(lowercase_ )
write_file_binary(lowercase_ ,lowercase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 310
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = FunnelTokenizer
SCREAMING_SNAKE_CASE__ :Dict = FunnelTokenizerFast
SCREAMING_SNAKE_CASE__ :List[str] = True
SCREAMING_SNAKE_CASE__ :Dict = True
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
super().setUp()
_UpperCamelCase : Dict = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Union[str, Any] ) -> List[str]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : List[str] ) -> str:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : Any = "UNwant\u00E9d,running"
_UpperCamelCase : Optional[int] = "unwanted, running"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_UpperCamelCase : int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
_UpperCamelCase : List[str] = tokenizer("UNwant\u00E9d,running" )
_UpperCamelCase : Any = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
_UpperCamelCase : Dict = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 310
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = XGLMConfig
SCREAMING_SNAKE_CASE__ :Dict = {}
SCREAMING_SNAKE_CASE__ :Optional[Any] = "gelu"
def __init__( self : Union[str, Any] , __a : List[str] , __a : Optional[int]=14 , __a : str=7 , __a : List[str]=True , __a : List[Any]=True , __a : int=True , __a : int=99 , __a : Any=32 , __a : Any=2 , __a : Union[str, Any]=4 , __a : Union[str, Any]=37 , __a : Any="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : Dict=512 , __a : Tuple=0.02 , ) -> Dict:
_UpperCamelCase : str = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : Dict = use_input_mask
_UpperCamelCase : Optional[Any] = use_labels
_UpperCamelCase : List[str] = vocab_size
_UpperCamelCase : Optional[Any] = d_model
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[int] = ffn_dim
_UpperCamelCase : List[Any] = activation_function
_UpperCamelCase : Union[str, Any] = activation_dropout
_UpperCamelCase : Tuple = attention_dropout
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : List[str] = None
_UpperCamelCase : Any = 0
_UpperCamelCase : Dict = 2
_UpperCamelCase : Any = 1
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : str = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCamelCase : List[str] = None
if self.use_input_mask:
_UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Optional[int] = self.get_config()
_UpperCamelCase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Optional[int] = config_and_inputs
_UpperCamelCase : Dict = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : int = TFXGLMModelTester(self )
_UpperCamelCase : int = ConfigTester(self , config_class=__a , n_embd=37 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
super().test_resize_token_embeddings()
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : str , __a : int=True ) -> Tuple:
_UpperCamelCase : List[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCamelCase : Optional[Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCamelCase : List[Any] = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_UpperCamelCase : str = model.generate(__a , do_sample=__a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase : Dict = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_UpperCamelCase : Tuple = tokenizer("Today is a nice day and" , return_tensors="tf" )
_UpperCamelCase : List[str] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_UpperCamelCase : Tuple = model.generate(__a , do_sample=__a , seed=[7, 0] )
_UpperCamelCase : str = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
_UpperCamelCase : Dict = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : List[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCamelCase : Optional[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCamelCase : Dict = "left"
# use different length sentences to test batching
_UpperCamelCase : Optional[Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCamelCase : Any = tokenizer(__a , return_tensors="tf" , padding=__a )
_UpperCamelCase : Any = inputs["input_ids"]
_UpperCamelCase : List[str] = model.generate(input_ids=__a , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
_UpperCamelCase : str = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCamelCase : Union[str, Any] = model.generate(input_ids=__a , max_new_tokens=12 )
_UpperCamelCase : List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCamelCase : str = model.generate(input_ids=__a , max_new_tokens=12 )
_UpperCamelCase : Tuple = tokenizer.batch_decode(__a , skip_special_tokens=__a )
_UpperCamelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
_UpperCamelCase : Any = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
| 310
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Dict = load_tool("text-to-speech" )
self.tool.setup()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_UpperCamelCase : Dict = self.tool("hey" )
_UpperCamelCase : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_UpperCamelCase : int = self.tool("hey" )
_UpperCamelCase : Dict = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 310
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ :Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase : int = text_generator("This is a test" , do_sample=__a )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_UpperCamelCase : List[Any] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
__a , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_UpperCamelCase : int = text_generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
] , )
_UpperCamelCase : int = text_generator.model.config.eos_token_id
_UpperCamelCase : int = "<pad>"
_UpperCamelCase : List[str] = text_generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
],
[
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : Optional[int] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase : Dict = text_generator("This is a test" , do_sample=__a )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_UpperCamelCase : Optional[int] = text_generator(["This is a test", "This is a second test"] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str] , __a : Tuple , __a : List[str] ) -> Dict:
_UpperCamelCase : Optional[Any] = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = "Hello I believe in"
_UpperCamelCase : List[str] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = text_generator(__a )
self.assertEqual(
__a , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_UpperCamelCase : Union[str, Any] = text_generator(__a , stop_sequence=" fe" )
self.assertEqual(__a , [{"generated_text": "Hello I believe in fe"}] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : Any ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = text_generator.model
_UpperCamelCase : Any = text_generator.tokenizer
_UpperCamelCase : Optional[Any] = text_generator("This is a test" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCamelCase : int = text_generator("This is a test" , return_full_text=__a )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCamelCase : Union[str, Any] = pipeline(task="text-generation" , model=__a , tokenizer=__a , return_full_text=__a )
_UpperCamelCase : int = text_generator("This is a test" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCamelCase : int = text_generator("This is a test" , return_full_text=__a )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCamelCase : Optional[int] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase : Any = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
_UpperCamelCase : Optional[int] = text_generator("test" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
_UpperCamelCase : Tuple = text_generator("test" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
_UpperCamelCase : int = text_generator("test" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase : Union[str, Any] = text_generator("" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase : Union[str, Any] = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase : int = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
_UpperCamelCase : Optional[int] = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
import torch
# Classic `model_kwargs`
_UpperCamelCase : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase : int = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase : Optional[Any] = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
import torch
_UpperCamelCase : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
_UpperCamelCase : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=__a , top_p=0.5 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : int = "Hello world"
_UpperCamelCase : Optional[Any] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_UpperCamelCase : Tuple = logging.get_logger("transformers.generation.tf_utils" )
else:
_UpperCamelCase : List[str] = logging.get_logger("transformers.generation.utils" )
_UpperCamelCase : Tuple = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
_UpperCamelCase : Any = text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
_UpperCamelCase : List[str] = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
_UpperCamelCase : Optional[Any] = text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "conditional_detr"
SCREAMING_SNAKE_CASE__ :Any = ["past_key_values"]
SCREAMING_SNAKE_CASE__ :Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , __a : Optional[int]=True , __a : Any=None , __a : Optional[int]=3 , __a : Optional[Any]=300 , __a : str=6 , __a : Dict=2048 , __a : Any=8 , __a : Optional[int]=6 , __a : Union[str, Any]=2048 , __a : Optional[int]=8 , __a : Dict=0.0 , __a : Tuple=0.0 , __a : Optional[int]=True , __a : str="relu" , __a : Optional[Any]=256 , __a : Union[str, Any]=0.1 , __a : str=0.0 , __a : List[str]=0.0 , __a : Optional[int]=0.02 , __a : List[str]=1.0 , __a : Optional[int]=False , __a : List[Any]="sine" , __a : List[Any]="resnet50" , __a : Tuple=True , __a : Union[str, Any]=False , __a : List[Any]=2 , __a : int=5 , __a : List[Any]=2 , __a : Dict=1 , __a : int=1 , __a : List[Any]=2 , __a : Any=5 , __a : Union[str, Any]=2 , __a : str=0.25 , **__a : Tuple , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCamelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__a , __a ):
_UpperCamelCase : str = backbone_config.get("model_type" )
_UpperCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : int = config_class.from_dict(__a )
_UpperCamelCase : List[Any] = use_timm_backbone
_UpperCamelCase : List[str] = backbone_config
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Union[str, Any] = num_queries
_UpperCamelCase : Any = d_model
_UpperCamelCase : Union[str, Any] = encoder_ffn_dim
_UpperCamelCase : Any = encoder_layers
_UpperCamelCase : Union[str, Any] = encoder_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_ffn_dim
_UpperCamelCase : Optional[Any] = decoder_layers
_UpperCamelCase : Any = decoder_attention_heads
_UpperCamelCase : int = dropout
_UpperCamelCase : Dict = attention_dropout
_UpperCamelCase : Optional[Any] = activation_dropout
_UpperCamelCase : Any = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : int = init_xavier_std
_UpperCamelCase : Union[str, Any] = encoder_layerdrop
_UpperCamelCase : Optional[Any] = decoder_layerdrop
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Dict = auxiliary_loss
_UpperCamelCase : Dict = position_embedding_type
_UpperCamelCase : Dict = backbone
_UpperCamelCase : Optional[Any] = use_pretrained_backbone
_UpperCamelCase : str = dilation
# Hungarian matcher
_UpperCamelCase : Optional[Any] = class_cost
_UpperCamelCase : str = bbox_cost
_UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
_UpperCamelCase : Dict = mask_loss_coefficient
_UpperCamelCase : Dict = dice_loss_coefficient
_UpperCamelCase : List[Any] = cls_loss_coefficient
_UpperCamelCase : Tuple = bbox_loss_coefficient
_UpperCamelCase : Any = giou_loss_coefficient
_UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.encoder_attention_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.d_model
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase : Tuple = self.backbone_config.to_dict()
_UpperCamelCase : str = self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1e-5
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12
| 310
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = torch.device("cpu")
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : int = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Dict = dct.pop(lowercase_ )
_UpperCamelCase : List[Any] = val
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = []
for k in state_dict.keys():
_UpperCamelCase : str = k
if ".pwconv" in k:
_UpperCamelCase : List[str] = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
_UpperCamelCase : Union[str, Any] = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
_UpperCamelCase : List[str] = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
_UpperCamelCase : int = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
_UpperCamelCase : Dict = k_new.split("." )
if ls[2].isdigit():
_UpperCamelCase : List[str] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
_UpperCamelCase : Any = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCamelCase : Any = 1_000
_UpperCamelCase : List[str] = "huggingface/label-files"
_UpperCamelCase : List[str] = "imagenet-1k-id2label.json"
_UpperCamelCase : Optional[Any] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : List[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : int = idalabel
_UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_UpperCamelCase : Any = [3, 3, 6, 4]
_UpperCamelCase : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_UpperCamelCase : List[Any] = [3, 3, 9, 6]
_UpperCamelCase : Optional[int] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_UpperCamelCase : Optional[Any] = [4, 3, 10, 5]
_UpperCamelCase : List[str] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_UpperCamelCase : int = [4, 4, 12, 6]
_UpperCamelCase : List[str] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
_UpperCamelCase : Tuple = torch.hub.load_state_dict_from_url(lowercase_ ,map_location="cpu" ,check_hash=lowercase_ )
else:
_UpperCamelCase : Tuple = torch.load(lowercase_ ,map_location="cpu" )
_UpperCamelCase : Optional[int] = checkpoint
_UpperCamelCase : str = create_rename_keys(lowercase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase_ ,lowercase_ ,lowercase_ )
# load HuggingFace model
_UpperCamelCase : Optional[int] = SwiftFormerForImageClassification(lowercase_ ).eval()
hf_model.load_state_dict(lowercase_ )
# prepare test inputs
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
_UpperCamelCase : Optional[int] = processor(images=lowercase_ ,return_tensors="pt" )
# compare outputs from both models
_UpperCamelCase : Union[str, Any] = get_expected_output(lowercase_ )
_UpperCamelCase : Optional[Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] ,lowercase_ ,atol=1e-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowerCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 310
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 1
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase__ = datasets.logging.get_logger(__name__)
lowerCamelCase__ = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowerCamelCase__ = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowerCamelCase__ = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Union[str, Any] ) -> Tuple:
if self.config_name == "default":
_UpperCamelCase : List[str] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_UpperCamelCase : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : List[Any] , __a : Tuple , __a : int=None , __a : Dict=False ) -> Optional[Any]:
if gpus is None:
_UpperCamelCase : Tuple = 1 if torch.cuda.is_available() else 0
_UpperCamelCase : Optional[Any] = {"src": sources, "mt": predictions, "ref": references}
_UpperCamelCase : Dict = [dict(zip(__a , __a ) ) for t in zip(*data.values() )]
_UpperCamelCase, _UpperCamelCase : List[str] = self.scorer.predict(__a , gpus=__a , progress_bar=__a )
return {"mean_score": mean_score, "scores": scores}
| 310
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : Optional[Any] , **__a : Any ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any] ) -> Dict:
_UpperCamelCase : Optional[int] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_UpperCamelCase : Optional[Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[Any] , __a : List[Any] ) -> Tuple:
_UpperCamelCase : str = object_detector(examples[0] , threshold=0.0 )
_UpperCamelCase : str = len(__a )
self.assertGreater(__a , 0 )
self.assertEqual(
__a , [
{
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
}
for i in range(__a )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_UpperCamelCase : Any = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
_UpperCamelCase : Optional[Any] = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : Dict = pipeline("zero-shot-object-detection" )
_UpperCamelCase : int = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
_UpperCamelCase : Dict = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
pass
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Dict = 0.2
_UpperCamelCase : Any = pipeline("zero-shot-object-detection" )
_UpperCamelCase : Any = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=__a , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : int = 2
_UpperCamelCase : int = pipeline("zero-shot-object-detection" )
_UpperCamelCase : Dict = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=__a , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 310
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase__ = "hf-internal-testing/tiny-random-bert"
lowerCamelCase__ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowerCamelCase__ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Any = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , "refs" , "main" ) ) as f:
_UpperCamelCase : Dict = f.read()
self.assertEqual(__a , os.path.join(__a , "snapshots" , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
_UpperCamelCase : Tuple = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
_UpperCamelCase : Any = cached_file(__a , __a , revision="9b8c223" )
self.assertEqual(__a , os.path.join(__a , "snapshots" , __a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
with self.assertRaisesRegex(__a , "is not a valid model identifier" ):
_UpperCamelCase : Tuple = cached_file("tiny-random-bert" , __a )
with self.assertRaisesRegex(__a , "is not a valid git identifier" ):
_UpperCamelCase : int = cached_file(__a , __a , revision="aaaa" )
with self.assertRaisesRegex(__a , "does not appear to have a file named" ):
_UpperCamelCase : Tuple = cached_file(__a , "conf" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
with self.assertRaisesRegex(__a , "does not appear to have a file named" ):
_UpperCamelCase : List[Any] = cached_file(__a , "conf" )
with open(os.path.join(__a , "refs" , "main" ) ) as f:
_UpperCamelCase : Optional[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , ".no_exist" , __a , "conf" ) ) )
_UpperCamelCase : List[Any] = cached_file(__a , "conf" , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
_UpperCamelCase : int = cached_file(__a , "conf" , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
_UpperCamelCase : Any = mock.Mock()
_UpperCamelCase : Dict = 500
_UpperCamelCase : str = {}
_UpperCamelCase : Optional[Any] = HTTPError
_UpperCamelCase : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
_UpperCamelCase : Union[str, Any] = cached_file(__a , "conf" , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , __a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , __a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , __a , revision="ahaha" )
_UpperCamelCase : Tuple = get_file_from_repo("bert-base-cased" , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
_UpperCamelCase : List[Any] = json.loads(open(__a , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = Path(__a ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(__a , "a.txt" ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , "b.txt" ) )
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , **__a : int ) -> List[Any]:
super().__init__(**__a )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[Any] , __a : Union[np.ndarray, bytes, str] , **__a : Tuple ) -> Any:
return super().__call__(__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__a : Union[str, Any] ) -> Dict:
_UpperCamelCase : Tuple = {}
if "candidate_labels" in kwargs:
_UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_UpperCamelCase : Optional[int] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Any , __a : str=None , __a : Dict="This is a sound of {}." ) -> Tuple:
if isinstance(__a , __a ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase : Optional[Any] = requests.get(__a ).content
else:
with open(__a , "rb" ) as f:
_UpperCamelCase : Tuple = f.read()
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = ffmpeg_read(__a , self.feature_extractor.sampling_rate )
if not isinstance(__a , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
_UpperCamelCase : Tuple = candidate_labels
_UpperCamelCase : int = [hypothesis_template.format(__a ) for x in candidate_labels]
_UpperCamelCase : Optional[Any] = self.tokenizer(__a , return_tensors=self.framework , padding=__a )
_UpperCamelCase : Optional[int] = [text_inputs]
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int ) -> List[Any]:
_UpperCamelCase : Optional[int] = model_inputs.pop("candidate_labels" )
_UpperCamelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __a ):
_UpperCamelCase : List[Any] = text_inputs[0]
else:
# Batching case.
_UpperCamelCase : int = text_inputs[0][0]
_UpperCamelCase : Union[str, Any] = self.model(**__a , **__a )
_UpperCamelCase : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = model_outputs.pop("candidate_labels" )
_UpperCamelCase : int = model_outputs["logits"][0]
if self.framework == "pt":
_UpperCamelCase : Optional[Any] = logits.softmax(dim=0 )
_UpperCamelCase : Dict = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
_UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] )
]
return result
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _UpperCamelCase , )
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
"""simple docstring"""
lowerCamelCase__ = "Input must be a string of 8 numbers plus letter"
lowerCamelCase__ = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Optional[Any] = F'''Expected string as input, found {type(lowercase_ ).__name__}'''
raise TypeError(lowercase_ )
_UpperCamelCase : int = spanish_id.replace("-" ,"" ).upper()
if len(lowercase_ ) != 9:
raise ValueError(lowercase_ )
try:
_UpperCamelCase : Optional[Any] = int(spanish_id_clean[0:8] )
_UpperCamelCase : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase_ ) from ex
if letter.isdigit():
raise ValueError(lowercase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 1
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( lowercase_ ,lowercase_=0.999 ,lowercase_="cosine" ,) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_UpperCamelCase : Dict = []
for i in range(lowercase_ ):
_UpperCamelCase : Dict = i / num_diffusion_timesteps
_UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase_ ) / alpha_bar_fn(lowercase_ ) ,lowercase_ ) )
return torch.tensor(lowercase_ ,dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ :int = 2
@register_to_config
def __init__( self : Optional[int] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : str = "linspace" , __a : int = 0 , ) -> str:
if trained_betas is not None:
_UpperCamelCase : List[str] = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCamelCase : Any = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase : Optional[int] = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_UpperCamelCase : int = 1.0 - self.betas
_UpperCamelCase : List[str] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[Any] , __a : str=None ) -> Tuple:
if schedule_timesteps is None:
_UpperCamelCase : Union[str, Any] = self.timesteps
_UpperCamelCase : Optional[int] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCamelCase : Any = 1 if len(__a ) > 1 else 0
else:
_UpperCamelCase : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
_UpperCamelCase : Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
_UpperCamelCase : Optional[int] = self.index_for_timestep(__a )
if self.state_in_first_order:
_UpperCamelCase : str = self.sigmas[step_index]
else:
_UpperCamelCase : int = self.sigmas_interpol[step_index]
_UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ) -> str:
_UpperCamelCase : Union[str, Any] = num_inference_steps
_UpperCamelCase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCamelCase : Dict = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCamelCase : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCamelCase : Tuple = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCamelCase : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCamelCase : Optional[int] = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_UpperCamelCase : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCamelCase : Optional[int] = torch.from_numpy(np.log(__a ) ).to(__a )
_UpperCamelCase : Dict = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
_UpperCamelCase : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCamelCase : Union[str, Any] = torch.from_numpy(__a ).to(device=__a )
# interpolate sigmas
_UpperCamelCase : Any = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_UpperCamelCase : List[str] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCamelCase : List[str] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__a ).startswith("mps" ):
# mps does not support float64
_UpperCamelCase : List[Any] = torch.from_numpy(__a ).to(__a , dtype=torch.floataa )
else:
_UpperCamelCase : str = torch.from_numpy(__a ).to(__a )
# interpolate timesteps
_UpperCamelCase : Optional[Any] = self.sigma_to_t(__a ).to(__a , dtype=timesteps.dtype )
_UpperCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_UpperCamelCase : int = torch.cat([timesteps[:1], interleaved_timesteps] )
_UpperCamelCase : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCamelCase : Any = defaultdict(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Dict ) -> int:
# get log sigma
_UpperCamelCase : List[Any] = sigma.log()
# get distribution
_UpperCamelCase : Tuple = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_UpperCamelCase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_UpperCamelCase : Any = low_idx + 1
_UpperCamelCase : List[Any] = self.log_sigmas[low_idx]
_UpperCamelCase : Dict = self.log_sigmas[high_idx]
# interpolate sigmas
_UpperCamelCase : List[str] = (low - log_sigma) / (low - high)
_UpperCamelCase : Dict = w.clamp(0 , 1 )
# transform interpolation to time range
_UpperCamelCase : Any = (1 - w) * low_idx + w * high_idx
_UpperCamelCase : Optional[int] = t.view(sigma.shape )
return t
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.sample is None
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
_UpperCamelCase : List[str] = self.index_for_timestep(__a )
# advance index counter by 1
_UpperCamelCase : List[Any] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCamelCase : int = self.sigmas[step_index]
_UpperCamelCase : List[str] = self.sigmas_interpol[step_index + 1]
_UpperCamelCase : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_UpperCamelCase : Optional[int] = self.sigmas[step_index - 1]
_UpperCamelCase : Union[str, Any] = self.sigmas_interpol[step_index]
_UpperCamelCase : Union[str, Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCamelCase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_UpperCamelCase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCamelCase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_UpperCamelCase : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCamelCase : str = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCamelCase : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
_UpperCamelCase : Optional[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_UpperCamelCase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_UpperCamelCase : List[str] = sigma_next - sigma_hat
_UpperCamelCase : Optional[int] = self.sample
_UpperCamelCase : List[str] = None
_UpperCamelCase : Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_UpperCamelCase : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
_UpperCamelCase : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCamelCase : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCamelCase : Optional[Any] = self.timesteps.to(original_samples.device )
_UpperCamelCase : List[Any] = timesteps.to(original_samples.device )
_UpperCamelCase : str = [self.index_for_timestep(__a , __a ) for t in timesteps]
_UpperCamelCase : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCamelCase : str = sigma.unsqueeze(-1 )
_UpperCamelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 310
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :BigBirdConfig
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ :bool = True
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
super().setup()
_UpperCamelCase : Union[str, Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Dict , *__a : str , **__a : str ) -> Dict:
_UpperCamelCase : Union[str, Any] = super().__call__(*__a , **__a )
_UpperCamelCase : List[str] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = FlaxBigBirdForNaturalQuestionsModule
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
def cross_entropy(lowercase_ ,lowercase_ ,lowercase_=None ):
_UpperCamelCase : List[Any] = logits.shape[-1]
_UpperCamelCase : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("f4" )
_UpperCamelCase : str = jax.nn.log_softmax(lowercase_ ,axis=-1 )
_UpperCamelCase : Any = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
_UpperCamelCase : Dict = reduction(lowercase_ )
return loss
_UpperCamelCase : List[str] = partial(lowercase_ ,reduction=jnp.mean )
_UpperCamelCase : Any = cross_entropy(lowercase_ ,lowercase_ )
_UpperCamelCase : str = cross_entropy(lowercase_ ,lowercase_ )
_UpperCamelCase : str = cross_entropy(lowercase_ ,lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE__ :int = 3_000
SCREAMING_SNAKE_CASE__ :int = 10_500
SCREAMING_SNAKE_CASE__ :int = 128
SCREAMING_SNAKE_CASE__ :int = 3
SCREAMING_SNAKE_CASE__ :int = 1
SCREAMING_SNAKE_CASE__ :int = 5
# tx_args
SCREAMING_SNAKE_CASE__ :float = 3e-5
SCREAMING_SNAKE_CASE__ :float = 0.0
SCREAMING_SNAKE_CASE__ :int = 20_000
SCREAMING_SNAKE_CASE__ :float = 0.0_095
SCREAMING_SNAKE_CASE__ :str = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE__ :str = "training-expt"
SCREAMING_SNAKE_CASE__ :str = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE__ :str = "data/nq-validation.jsonl"
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
os.makedirs(self.base_dir , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(self.base_dir , self.save_dir )
_UpperCamelCase : Union[str, Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int = 4_096 # no dynamic padding on TPUs
def __call__( self : Optional[int] , __a : Any ) -> Tuple:
_UpperCamelCase : str = self.collate_fn(__a )
_UpperCamelCase : int = jax.tree_util.tree_map(__a , __a )
return batch
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] ) -> Dict:
_UpperCamelCase, _UpperCamelCase : Tuple = self.fetch_inputs(features["input_ids"] )
_UpperCamelCase : List[str] = {
"input_ids": jnp.array(__a , dtype=jnp.intaa ),
"attention_mask": jnp.array(__a , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list ) -> str:
_UpperCamelCase : Union[str, Any] = [self._fetch_inputs(__a ) for ids in input_ids]
return zip(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : list ) -> Optional[Any]:
_UpperCamelCase : int = [1 for _ in range(len(__a ) )]
while len(__a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> Union[str, Any]:
"""simple docstring"""
if seed is not None:
_UpperCamelCase : Optional[Any] = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
_UpperCamelCase : Any = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap ,axis_name="batch" )
def lowercase__ ( lowercase_ ,lowercase_ ,**lowercase_ ) -> Any:
"""simple docstring"""
def loss_fn(lowercase_ ):
_UpperCamelCase : List[Any] = model_inputs.pop("start_labels" )
_UpperCamelCase : Optional[int] = model_inputs.pop("end_labels" )
_UpperCamelCase : List[Any] = model_inputs.pop("pooled_labels" )
_UpperCamelCase : List[Any] = state.apply_fn(**lowercase_ ,params=lowercase_ ,dropout_rng=lowercase_ ,train=lowercase_ )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = outputs
return state.loss_fn(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,)
_UpperCamelCase, _UpperCamelCase : Dict = jax.random.split(lowercase_ )
_UpperCamelCase : List[str] = jax.value_and_grad(lowercase_ )
_UpperCamelCase, _UpperCamelCase : List[Any] = grad_fn(state.params )
_UpperCamelCase : Tuple = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
_UpperCamelCase : Tuple = jax.lax.pmean(lowercase_ ,"batch" )
_UpperCamelCase : List[Any] = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="batch" )
def lowercase__ ( lowercase_ ,**lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = model_inputs.pop("start_labels" )
_UpperCamelCase : List[str] = model_inputs.pop("end_labels" )
_UpperCamelCase : Optional[Any] = model_inputs.pop("pooled_labels" )
_UpperCamelCase : Optional[int] = state.apply_fn(**lowercase_ ,params=state.params ,train=lowercase_ )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = outputs
_UpperCamelCase : List[Any] = state.loss_fn(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
return metrics
class __SCREAMING_SNAKE_CASE ( train_state.TrainState ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Callable = struct.field(pytree_node=_UpperCamelCase )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Args
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :Callable
SCREAMING_SNAKE_CASE__ :wandb
SCREAMING_SNAKE_CASE__ :Callable = None
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, Any] , __a : List[str] , __a : Tuple , __a : Any=None ) -> List[Any]:
_UpperCamelCase : Any = model.params
_UpperCamelCase : Tuple = TrainState.create(
apply_fn=model.__call__ , params=__a , tx=__a , loss_fn=__a , )
if ckpt_dir is not None:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = restore_checkpoint(__a , __a )
_UpperCamelCase : int = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
_UpperCamelCase, _UpperCamelCase : str = build_tx(**__a )
_UpperCamelCase : List[str] = train_state.TrainState(
step=__a , apply_fn=model.__call__ , params=__a , tx=__a , opt_state=__a , )
_UpperCamelCase : Any = args
_UpperCamelCase : Optional[Any] = data_collator
_UpperCamelCase : Union[str, Any] = lr
_UpperCamelCase : Optional[Any] = params
_UpperCamelCase : Optional[Any] = jax_utils.replicate(__a )
return state
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[Any] , __a : List[str] , __a : str ) -> Tuple:
_UpperCamelCase : List[Any] = self.args
_UpperCamelCase : List[str] = len(__a ) // args.batch_size
_UpperCamelCase : Optional[int] = jax.random.PRNGKey(0 )
_UpperCamelCase : int = jax.random.split(__a , jax.device_count() )
for epoch in range(args.max_epochs ):
_UpperCamelCase : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
_UpperCamelCase : int = get_batched_dataset(__a , args.batch_size , seed=__a )
_UpperCamelCase : List[str] = 0
for batch in tqdm(__a , total=__a , desc=F'''Running EPOCH-{epoch}''' ):
_UpperCamelCase : List[Any] = self.data_collator(__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = self.train_step_fn(__a , __a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
_UpperCamelCase : Union[str, Any] = jax_utils.unreplicate(state.step )
_UpperCamelCase : List[str] = running_loss.item() / i
_UpperCamelCase : Any = self.scheduler_fn(state_step - 1 )
_UpperCamelCase : List[str] = self.evaluate(__a , __a )
_UpperCamelCase : Optional[int] = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__a ) )
self.logger.log(__a , commit=__a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str , __a : Optional[int] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = get_batched_dataset(__a , self.args.batch_size )
_UpperCamelCase : List[Any] = len(__a ) // self.args.batch_size
_UpperCamelCase : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
_UpperCamelCase : Optional[Any] = 0
for batch in tqdm(__a , total=__a , desc="Evaluating ... " ):
_UpperCamelCase : Tuple = self.data_collator(__a )
_UpperCamelCase : str = self.val_step_fn(__a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int , __a : str ) -> str:
_UpperCamelCase : int = jax_utils.unreplicate(__a )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... " )
self.model_save_fn(__a , params=state.params )
with open(os.path.join(__a , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__a , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __a )
print("DONE" )
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' ,end=" ... " )
with open(os.path.join(lowercase_ ,"flax_model.msgpack" ) ,"rb" ) as f:
_UpperCamelCase : Union[str, Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(lowercase_ ,"opt_state.msgpack" ) ,"rb" ) as f:
_UpperCamelCase : Any = from_bytes(state.opt_state ,f.read() )
_UpperCamelCase : List[Any] = joblib.load(os.path.join(lowercase_ ,"args.joblib" ) )
_UpperCamelCase : Union[str, Any] = joblib.load(os.path.join(lowercase_ ,"data_collator.joblib" ) )
with open(os.path.join(lowercase_ ,"training_state.json" ) ,"r" ) as f:
_UpperCamelCase : int = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = num_train_steps - warmup_steps
_UpperCamelCase : int = optax.linear_schedule(init_value=lowercase_ ,end_value=lowercase_ ,transition_steps=lowercase_ )
_UpperCamelCase : Tuple = optax.linear_schedule(init_value=lowercase_ ,end_value=1e-7 ,transition_steps=lowercase_ )
_UpperCamelCase : Optional[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
def weight_decay_mask(lowercase_ ):
_UpperCamelCase : List[str] = traverse_util.flatten_dict(lowercase_ )
_UpperCamelCase : Optional[Any] = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
_UpperCamelCase : Union[str, Any] = scheduler_fn(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : List[str] = optax.adamw(learning_rate=lowercase_ ,weight_decay=lowercase_ ,mask=lowercase_ )
return tx, lr
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str]
SCREAMING_SNAKE_CASE__ :Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ :ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ :ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ :str = field(default="Translation" , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Dict ) -> List[str]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[List] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ :ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ :ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ :str = field(default="TranslationVariableLanguages" , init=_UpperCamelCase , repr=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_UpperCamelCase : str = len(self.languages ) if self.languages else None
def __call__( self : str ) -> Union[str, Any]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE ( self : int , __a : str ) -> List[Any]:
_UpperCamelCase : Optional[Any] = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({', '.join(__a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCamelCase : Any = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCamelCase, _UpperCamelCase : Optional[Any] = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str ) -> Dict:
with open(__a , encoding="utf-8" ) as input_file:
_UpperCamelCase : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
_UpperCamelCase : List[str] = input_file.read()
_UpperCamelCase : Dict = regexp.search(__a )
return match
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str ) -> List[Any]:
with open(__a , encoding="utf-8" ) as input_file:
_UpperCamelCase : Any = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
_UpperCamelCase : int = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase : str = regexp.finditer(__a )
_UpperCamelCase : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : Tuple = Path("./datasets" )
_UpperCamelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Union[str, Any] = Path("./datasets" )
_UpperCamelCase : Optional[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__a ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 310
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "llama"
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["past_key_values"]
def __init__( self : Any , __a : str=3_2000 , __a : List[Any]=4096 , __a : Dict=1_1008 , __a : Optional[int]=32 , __a : Dict=32 , __a : int=None , __a : List[Any]="silu" , __a : Optional[Any]=2048 , __a : Union[str, Any]=0.02 , __a : int=1e-6 , __a : Optional[int]=True , __a : int=0 , __a : Tuple=1 , __a : Dict=2 , __a : Tuple=1 , __a : List[Any]=False , __a : int=None , **__a : List[str] , ) -> Tuple:
_UpperCamelCase : str = vocab_size
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : int = hidden_size
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Optional[Any] = num_key_value_heads
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Optional[Any] = rms_norm_eps
_UpperCamelCase : Dict = pretraining_tp
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
_UpperCamelCase : Dict = self.rope_scaling.get("type" , __a )
_UpperCamelCase : Optional[Any] = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 310
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> YolosConfig:
"""simple docstring"""
_UpperCamelCase : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_UpperCamelCase : List[Any] = 192
_UpperCamelCase : Union[str, Any] = 768
_UpperCamelCase : int = 12
_UpperCamelCase : Any = 3
_UpperCamelCase : Optional[int] = [800, 1_333]
_UpperCamelCase : Any = False
elif yolos_name == "yolos_s_dWr":
_UpperCamelCase : Optional[Any] = 330
_UpperCamelCase : Optional[int] = 14
_UpperCamelCase : Union[str, Any] = 6
_UpperCamelCase : Tuple = 1_320
elif "yolos_s" in yolos_name:
_UpperCamelCase : str = 384
_UpperCamelCase : List[str] = 1_536
_UpperCamelCase : Any = 12
_UpperCamelCase : int = 6
elif "yolos_b" in yolos_name:
_UpperCamelCase : Union[str, Any] = [800, 1_344]
_UpperCamelCase : Union[str, Any] = 91
_UpperCamelCase : Any = "huggingface/label-files"
_UpperCamelCase : Union[str, Any] = "coco-detection-id2label.json"
_UpperCamelCase : List[Any] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : Tuple = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : Union[str, Any] = in_proj_weight[-config.hidden_size :, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "backbone" in name:
_UpperCamelCase : Tuple = name.replace("backbone" ,"vit" )
if "cls_token" in name:
_UpperCamelCase : Optional[int] = name.replace("cls_token" ,"embeddings.cls_token" )
if "det_token" in name:
_UpperCamelCase : Union[str, Any] = name.replace("det_token" ,"embeddings.detection_tokens" )
if "mid_pos_embed" in name:
_UpperCamelCase : Optional[int] = name.replace("mid_pos_embed" ,"encoder.mid_position_embeddings" )
if "pos_embed" in name:
_UpperCamelCase : List[Any] = name.replace("pos_embed" ,"embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_UpperCamelCase : Tuple = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace("blocks" ,"encoder.layer" )
if "attn.proj" in name:
_UpperCamelCase : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : Optional[int] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : List[str] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : Tuple = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Dict = name.replace("mlp.fc2" ,"output.dense" )
if "class_embed" in name:
_UpperCamelCase : Any = name.replace("class_embed" ,"class_labels_classifier" )
if "bbox_embed" in name:
_UpperCamelCase : Any = name.replace("bbox_embed" ,"bbox_predictor" )
if "vit.norm" in name:
_UpperCamelCase : Any = name.replace("vit.norm" ,"vit.layernorm" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Tuple = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_UpperCamelCase : Any = key.split("." )
_UpperCamelCase : int = int(key_split[2] )
_UpperCamelCase : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_UpperCamelCase : Tuple = val[:dim, :]
_UpperCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
_UpperCamelCase : Optional[int] = val[:dim]
_UpperCamelCase : List[Any] = val[dim : dim * 2]
_UpperCamelCase : Any = val[-dim:]
else:
_UpperCamelCase : Dict = val
return orig_state_dict
def lowercase__ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : Union[str, Any] = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = get_yolos_config(lowercase_ )
# load original state_dict
_UpperCamelCase : Dict = torch.load(lowercase_ ,map_location="cpu" )["model"]
# load 🤗 model
_UpperCamelCase : int = YolosForObjectDetection(lowercase_ )
model.eval()
_UpperCamelCase : Optional[Any] = convert_state_dict(lowercase_ ,lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by YolosImageProcessor
_UpperCamelCase : Tuple = 800 if yolos_name != "yolos_ti" else 512
_UpperCamelCase : Tuple = YolosImageProcessor(format="coco_detection" ,size=lowercase_ )
_UpperCamelCase : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : Dict = model(**lowercase_ )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.logits, outputs.pred_boxes
_UpperCamelCase, _UpperCamelCase : Optional[int] = None, None
if yolos_name == "yolos_ti":
_UpperCamelCase : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_UpperCamelCase : int = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_UpperCamelCase : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_UpperCamelCase : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_UpperCamelCase : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_UpperCamelCase : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_UpperCamelCase : Dict = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_UpperCamelCase : Optional[int] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_UpperCamelCase : Any = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_UpperCamelCase : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] ,lowercase_ ,atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] ,lowercase_ ,atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
_UpperCamelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
_UpperCamelCase : List[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(lowercase_ ,organization="hustvl" )
model.push_to_hub(lowercase_ ,organization="hustvl" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = []
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
for i in range(len(lowercase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase_ ,-1 ,-1 ) ,range(lowercase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase_ ,-1 ,-1 ) ,range(lowercase_ ,len(lowercase_ ) ) ):
if board[i][j] == 1:
return False
return True
def lowercase__ ( lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
if row >= len(lowercase_ ):
solution.append(lowercase_ )
printboard(lowercase_ )
print()
return True
for i in range(len(lowercase_ ) ):
if is_safe(lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : Optional[int] = 1
solve(lowercase_ ,row + 1 )
_UpperCamelCase : Dict = 0
return False
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
for i in range(len(lowercase_ ) ):
for j in range(len(lowercase_ ) ):
if board[i][j] == 1:
print("Q" ,end=" " )
else:
print("." ,end=" " )
print()
# n=int(input("The no. of queens"))
lowerCamelCase__ = 8
lowerCamelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 310
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.