code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =None
UpperCamelCase =None
UpperCamelCase =None
UpperCamelCase =None
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=5_12 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Optional[Any] = project_dim
__lowercase : Union[str, Any] = pooler_fn
__lowercase : List[Any] = learn_encoder
__lowercase : Union[str, Any] = use_attention_mask
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[r"pooler", r"logit_scale"]
UpperCamelCase =[r"position_ids", r"predictions.decoder.bias"]
UpperCamelCase ="roberta"
UpperCamelCase =RobertaSeriesConfig
def __init__( self , UpperCamelCase_ ) -> Dict:
super().__init__(UpperCamelCase_ )
__lowercase : Optional[int] = XLMRobertaModel(UpperCamelCase_ )
__lowercase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : str = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ )
if self.has_pre_transformation:
__lowercase : int = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Tuple:
__lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : int = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
__lowercase : Any = outputs['''hidden_states'''][-2]
__lowercase : Dict = self.pre_LN(UpperCamelCase_ )
__lowercase : Union[str, Any] = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase : List[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 76 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 178 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
@abstractmethod
def a_ ( lowercase_ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def a_ ( self ) -> Any:
raise NotImplementedError()
| 183 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase = [image]
UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
UpperCAmelCase = torch.stack(lowerCAmelCase )
return image
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , lowercase_ , lowercase_ ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def a_ ( self , lowercase_ ) -> Tuple:
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Any:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(lowercase_ )
# 2. Preprocess image
UpperCAmelCase = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , self.device )
UpperCAmelCase = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
UpperCAmelCase = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
UpperCAmelCase = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 183 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowercase , _lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
_lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(_lowercase , return_tensors="""np""" )
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=_lowercase )
_lowerCAmelCase = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(_lowercase )
_lowerCAmelCase = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = class_size
_lowerCAmelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCAmelCase = nn.Linear(_lowercase , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.mlp(_lowercase )
return logits
| 162 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
__magic_name__: List[str] = seed
__magic_name__: int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__magic_name__: List[Any] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Dict = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Tuple = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__magic_name__: Tuple = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__magic_name__: Dict = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
__lowerCamelCase = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 96 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_UpperCamelCase : Any =HUGGINGFACE_HUB_CACHE
_UpperCamelCase : List[str] ="config.json"
_UpperCamelCase : Union[str, Any] ="diffusion_pytorch_model.bin"
_UpperCamelCase : List[str] ="diffusion_flax_model.msgpack"
_UpperCamelCase : Any ="model.onnx"
_UpperCamelCase : List[Any] ="diffusion_pytorch_model.safetensors"
_UpperCamelCase : str ="weights.pb"
_UpperCamelCase : Union[str, Any] ="https://huggingface.co"
_UpperCamelCase : Any =default_cache_path
_UpperCamelCase : List[str] ="diffusers_modules"
_UpperCamelCase : Optional[Any] =os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_UpperCamelCase : str =["fp16", "non-ema"]
_UpperCamelCase : str =".self_attn"
| 316 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : torch.FloatTensor
class __a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Dict , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 88 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , )-> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = num_attention_heads * attention_head_dim
UpperCamelCase = in_channels
UpperCamelCase = torch.nn.GroupNorm(num_groups=UpperCAmelCase_ , num_channels=UpperCAmelCase_ , eps=1e-6 , affine=UpperCAmelCase_ )
UpperCamelCase = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. Define transformers blocks
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dropout=UpperCAmelCase_ , cross_attention_dim=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , attention_bias=UpperCAmelCase_ , double_self_attention=UpperCAmelCase_ , norm_elementwise_affine=UpperCAmelCase_ , )
for d in range(UpperCAmelCase_ )
] )
UpperCamelCase = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : bool = True , )-> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = hidden_states.shape
UpperCamelCase = batch_frames // num_frames
UpperCamelCase = hidden_states
UpperCamelCase = hidden_states[None, :].reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
UpperCamelCase = self.norm(UpperCAmelCase_ )
UpperCamelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.proj_in(UpperCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
UpperCamelCase = block(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ , cross_attention_kwargs=UpperCAmelCase_ , class_labels=UpperCAmelCase_ , )
# 3. Output
UpperCamelCase = self.proj_out(UpperCAmelCase_ )
UpperCamelCase = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
UpperCamelCase = hidden_states.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase_ )
| 556 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE = """bart"""
SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCamelCase = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCamelCase = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCamelCase = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCamelCase = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase = faiss.StandardGpuResources()
UpperCamelCase = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCamelCase = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_28) , )
UpperCamelCase = faiss.IndexFlatIP(1_28 )
UpperCamelCase = faiss.index_cpu_to_gpu(UpperCAmelCase_ , 1 , UpperCAmelCase_ )
wikiaab_gpu_index_flat.add(UpperCAmelCase_ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase = (None, None)
UpperCamelCase = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCamelCase = elia["train_eli5"]
UpperCamelCase = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_28) )
UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCAmelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_train_data()
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=10 )-> str:
"""simple docstring"""
UpperCamelCase = embed_questions_for_retrieval([question] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = eli5_train_q_index.search(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = [elia_train[int(UpperCAmelCase_ )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_="wiki40b" , UpperCAmelCase_="dense" , UpperCAmelCase_=10 )-> List[str]:
"""simple docstring"""
if source == "none":
UpperCamelCase , UpperCamelCase = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase = query_qa_dense_index(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCamelCase , UpperCamelCase = query_es_index(
UpperCAmelCase_ , UpperCAmelCase_ , index_name="english_wiki40b_snippets_100w" , n_results=UpperCAmelCase_ , )
UpperCamelCase = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCamelCase = "question: {} context: {}".format(UpperCAmelCase_ , UpperCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase_ : None),
} )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=64 , UpperCAmelCase_=2_56 , UpperCAmelCase_=False , UpperCAmelCase_=2 , UpperCAmelCase_=0.95 , UpperCAmelCase_=0.8 )-> int:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = qa_sas_generate(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_answers=1 , num_beams=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ , do_sample=UpperCAmelCase_ , temp=UpperCAmelCase_ , top_p=UpperCAmelCase_ , top_k=UpperCAmelCase_ , max_input_length=10_24 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
SCREAMING_SNAKE_CASE = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE = action_list.index(action_st)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
SCREAMING_SNAKE_CASE = """wiki40b"""
SCREAMING_SNAKE_CASE = """dense"""
SCREAMING_SNAKE_CASE = """beam"""
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 64
SCREAMING_SNAKE_CASE = 256
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE = None
# start main text
SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE = support_list[:10]
SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE = find_nearest_training(question)
SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 556 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="""crop_size""" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
UpperCamelCase__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase__ = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
UpperCamelCase__ = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(__lowerCAmelCase , param_name="""size""" , default_to_square=__lowerCAmelCase )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" , default_to_square=__lowerCAmelCase )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 619 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[torch.FloatTensor] = None
snake_case : torch.FloatTensor = None
snake_case : Optional[Tuple[torch.FloatTensor]] = None
snake_case : Optional[Tuple[torch.FloatTensor]] = None
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=512 , __lowerCAmelCase="cls" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = project_dim
UpperCamelCase__ = pooler_fn
UpperCamelCase__ = learn_encoder
UpperCamelCase__ = use_attention_mask
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : int = [r"""pooler""", r"""logit_scale"""]
snake_case : Tuple = [r"""position_ids""", r"""predictions.decoder.bias"""]
snake_case : str = """roberta"""
snake_case : Dict = RobertaSeriesConfig
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = XLMRobertaModel(__lowerCAmelCase )
UpperCamelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ = getattr(__lowerCAmelCase , """has_pre_transformation""" , __lowerCAmelCase )
if self.has_pre_transformation:
UpperCamelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = self.base_model(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowerCAmelCase , )
if self.has_pre_transformation:
UpperCamelCase__ = outputs["""hidden_states"""][-2]
UpperCamelCase__ = self.pre_LN(__lowerCAmelCase )
UpperCamelCase__ = self.transformation_pre(__lowerCAmelCase )
return TransformationModelOutput(
projection_state=__lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 619 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : str ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> str:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = FlaxBertModelTester(self )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase_ , lowercase_ ) ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
if dataset.ndim != value_array.ndim:
lowercase = (
"""Wrong input data's dimensions... """
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowercase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase = (
"""Wrong input data's shape... """
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowercase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowercase = (
"""Input data have different datatype... """
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowercase_ )
lowercase = []
for value in value_array:
lowercase = euclidean(lowercase_ , dataset[0] )
lowercase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase = euclidean(lowercase_ , lowercase_ )
if dist > temp_dist:
lowercase = temp_dist
lowercase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
return np.dot(lowercase_ , lowercase_ ) / (norm(lowercase_ ) * norm(lowercase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase ( lowerCamelCase__ ):
def __init__( self : str , _UpperCamelCase : Tuple=0.0_1 , _UpperCamelCase : Union[str, Any]=1_000 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = p_stop
SCREAMING_SNAKE_CASE = max_length
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE = random.random() < self.p_stop
class lowercase ( unittest.TestCase ):
def __snake_case( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : str=False , _UpperCamelCase : List[Any]=True ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards] , [len(__lowerCamelCase ) for e in expected] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __snake_case( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE = [BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : List[Any]=False ) -> Dict:
'''simple docstring'''
random.seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [
IterableDatasetShard(
__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase , num_processes=__lowerCamelCase , process_index=__lowerCamelCase , split_batches=__lowerCamelCase , )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCamelCase )
iterable_dataset_lists.append(list(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE = []
for idx in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCamelCase ) < len(__lowerCamelCase ):
reference += reference
self.assertListEqual(__lowerCamelCase , reference[: len(__lowerCamelCase )] )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE = SkipBatchSampler(__lowerCamelCase , 2 )
self.assertListEqual(list(__lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE = skip_first_batches(__lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
Accelerator()
SCREAMING_SNAKE_CASE = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 702 | import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase :List[str] = get_tests_dir('fixtures')
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase : Optional[int] = mock.Mock()
_UpperCAmelCase : Any = 5_0_0
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : List[Any] = HTTPError
_UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
_UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Tuple:
_UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def __lowerCAmelCase ( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-feature-extractor''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
_UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
_UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( self ) -> int:
CustomFeatureExtractor.register_for_auto_class()
_UpperCAmelCase : str = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 506 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = ["image_processor", "tokenizer"]
snake_case = "FlavaImageProcessor"
snake_case = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = kwargs.pop("""feature_extractor""" )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.image_processor
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[ImageInput] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if images is not None:
lowerCamelCase__ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_image_mask=SCREAMING_SNAKE_CASE_ , return_codebook_pixels=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self : Any ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 129 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = AudioClassificationPipeline(model=lowercase_ , feature_extractor=lowercase_ )
# test with a raw waveform
UpperCAmelCase = np.zeros((3_4_0_0_0,) )
UpperCAmelCase = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def a_ ( self , lowercase_ , lowercase_ ) -> int:
UpperCAmelCase , UpperCAmelCase = examples
UpperCAmelCase = audio_classifier(lowercase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
UpperCAmelCase = audio_classifier(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
self.run_torchaudio(lowercase_ )
@require_torchaudio
def a_ ( self , lowercase_ ) -> List[str]:
import datasets
# test with a local file
UpperCAmelCase = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
UpperCAmelCase = dataset[0]['audio']['array']
UpperCAmelCase = audio_classifier(lowercase_ )
self.assertEqual(
lowercase_ , [
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )},
] , )
@require_torch
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = 'anton-l/wav2vec2-random-tiny-classifier'
UpperCAmelCase = pipeline('audio-classification' , model=lowercase_ )
UpperCAmelCase = np.ones((8_0_0_0,) )
UpperCAmelCase = audio_classifier(lowercase_ , top_k=4 )
UpperCAmelCase = [
{'score': 0.0_8_4_2, 'label': 'no'},
{'score': 0.0_8_3_8, 'label': 'up'},
{'score': 0.0_8_3_7, 'label': 'go'},
{'score': 0.0_8_3_4, 'label': 'right'},
]
UpperCAmelCase = [
{'score': 0.0_8_4_5, 'label': 'stop'},
{'score': 0.0_8_4_4, 'label': 'on'},
{'score': 0.0_8_4_1, 'label': 'right'},
{'score': 0.0_8_3_4, 'label': 'left'},
]
self.assertIn(nested_simplify(lowercase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase = {'array': np.ones((8_0_0_0,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase = audio_classifier(lowercase_ , top_k=4 )
self.assertIn(nested_simplify(lowercase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def a_ ( self ) -> Tuple:
import datasets
UpperCAmelCase = 'superb/wav2vec2-base-superb-ks'
UpperCAmelCase = pipeline('audio-classification' , model=lowercase_ )
UpperCAmelCase = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
UpperCAmelCase = np.array(dataset[3]['speech'] , dtype=np.floataa )
UpperCAmelCase = audio_classifier(lowercase_ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=3 ) , [
{'score': 0.9_8_1, 'label': 'go'},
{'score': 0.0_0_7, 'label': 'up'},
{'score': 0.0_0_6, 'label': '_unknown_'},
{'score': 0.0_0_1, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def a_ ( self ) -> Optional[Any]:
pass
| 183 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
@abstractmethod
def a_ ( lowercase_ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def a_ ( self ) -> Any:
raise NotImplementedError()
| 183 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_A = load_file(_SCREAMING_SNAKE_CASE )
_A = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_A = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
_A = pipeline.text_encoder
else:
_A = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
_A = pipeline.unet
# find the target layer
_A = layer_infos.pop(0 )
while len(_SCREAMING_SNAKE_CASE ) > -1:
try:
_A = curr_layer.__getattr__(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_A = layer_infos.pop(0 )
elif len(_SCREAMING_SNAKE_CASE ) == 0:
break
except Exception:
if len(_SCREAMING_SNAKE_CASE ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_A = layer_infos.pop(0 )
_A = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_SCREAMING_SNAKE_CASE )
else:
pair_keys.append(_SCREAMING_SNAKE_CASE )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).unsqueeze(2 ).unsqueeze(3 )
else:
_A = state_dict[pair_keys[0]].to(torch.floataa )
_A = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# update visited list
for item in pair_keys:
visited.append(_SCREAMING_SNAKE_CASE )
return pipeline
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__A : int = parser.parse_args()
__A : Optional[Any] = args.base_model_path
__A : str = args.checkpoint_path
__A : Union[str, Any] = args.dump_path
__A : Tuple = args.lora_prefix_unet
__A : Optional[int] = args.lora_prefix_text_encoder
__A : Tuple = args.alpha
__A : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__A : Optional[int] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 27 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Any = """xglm"""
_lowerCamelCase : List[Any] = ["""past_key_values"""]
_lowerCamelCase : Any = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=25_6008 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ):
a_ = vocab_size
a_ = max_position_embeddings
a_ = d_model
a_ = ffn_dim
a_ = num_layers
a_ = attention_heads
a_ = activation_function
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = layerdrop
a_ = init_std
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
a_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) | 403 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
a_ = argparse.ArgumentParser(add_help=UpperCamelCase , allow_abbrev=UpperCamelCase )
# The main config parser
a_ = config_command_parser(UpperCamelCase )
# The subparser to add commands to
a_ = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase , parents=[parent_parser] )
update_command_parser(UpperCamelCase , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = get_config_parser()
a_ = config_parser.parse_args()
if not hasattr(UpperCamelCase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main() | 403 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (CMStochasticIterativeScheduler,)
lowerCAmelCase = 10
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ) -> str:
UpperCAmelCase_ : str = 10
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = self.scheduler_classes[0](**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler.timesteps[0]
UpperCAmelCase_ : Optional[int] = scheduler.timesteps[1]
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Any = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 1
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler.timesteps
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. scale model input
UpperCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict noise residual
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[Any] = pred_prev_sample
UpperCAmelCase_ : Dict = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1e-2
assert abs(result_mean.item() - 0.25_10 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = scheduler.timesteps
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase_ : Dict = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict noise residual
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1e-2
assert abs(result_mean.item() - 0.45_27 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = [39, 30, 12, 15, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [39, 30, 12, 1, 0]
UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE ,timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) | 30 |
'''simple docstring'''
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class A :
def __init__( self ) -> int:
'''simple docstring'''
lowercase__ = None
def __iter__( self ) -> Any:
'''simple docstring'''
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowercase__ = self.head
for _ in range(lowerCamelCase__ ):
lowercase__ = current.next
lowercase__ = data
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
lowercase__ = new_node
elif index == 0:
lowercase__ = self.head # link new_node to head
lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
def A__ ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowercase__ = self.head # default first node
if index == 0:
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
return delete_node.data
def A__ ( self ) -> bool:
'''simple docstring'''
return self.head is None
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = None
lowercase__ = self.head
while current:
# Store the current node's next node.
lowercase__ = current.next
# Make the current node's next point backwards
lowercase__ = prev
# Make the previous node be the current node
lowercase__ = current
# Make the current node the next node (to progress iteration)
lowercase__ = next_node
# Return prev in order to put the head at the end
lowercase__ = prev
def _A ( ):
lowercase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase__ ) == i
linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase__ ) == 9
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8 , 1 ) )
def _A ( ):
lowercase__ = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowercase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase__ )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowercase__ )
print("""\nReading/changing Node data using indexing:""" )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowercase__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowercase__ )
print(f'''length of linked_list is : {len(lowercase__ )}''' )
if __name__ == "__main__":
main()
| 325 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :List[str] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
lowerCAmelCase_ :Union[str, Any] = Dataset.from_dict(lowercase__ )
return dataset
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = get_dataset()
lowerCAmelCase_ :Dict = make_duplicate_clusters(__A , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = get_dataset()
lowerCAmelCase_ :Union[str, Any] = deduplicate_dataset(__A )
self.assertEqual(len(__A ) , 2 )
print(__A )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __A )
| 702 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = name
lowerCAmelCase_ :str = val
def __str__( self ) -> Dict:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __A ) -> Union[str, Any]:
return self.val < other.val
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :List[str] = {}
lowerCAmelCase_ :Optional[int] = self.build_heap(__A )
def __getitem__( self , __A ) -> Optional[int]:
return self.get_value(__A )
def __lowerCAmelCase ( self , __A ) -> int:
return (idx - 1) // 2
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return idx * 2 + 1
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return idx * 2 + 2
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return self.heap_dict[key]
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :str = len(__A ) - 1
lowerCAmelCase_ :Dict = self.get_parent_idx(__A )
for idx, i in enumerate(__A ):
lowerCAmelCase_ :List[Any] = idx
lowerCAmelCase_ :Union[str, Any] = i.val
for i in range(__A , -1 , -1 ):
self.sift_down(__A , __A )
return array
def __lowerCAmelCase ( self , __A , __A ) -> str:
while True:
lowerCAmelCase_ :List[str] = self.get_left_child_idx(__A ) # noqa: E741
lowerCAmelCase_ :int = self.get_right_child_idx(__A )
lowerCAmelCase_ :Dict = idx
if l < len(__A ) and array[l] < array[idx]:
lowerCAmelCase_ :Optional[Any] = l
if r < len(__A ) and array[r] < array[smallest]:
lowerCAmelCase_ :Optional[Any] = r
if smallest != idx:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase_ :Optional[Any] = smallest
else:
break
def __lowerCAmelCase ( self , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.get_parent_idx(__A )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase_ , lowerCAmelCase_ :int = self.heap[idx], self.heap[p]
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase_ :int = p
lowerCAmelCase_ :List[Any] = self.get_parent_idx(__A )
def __lowerCAmelCase ( self ) -> str:
return self.heap[0]
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ :int = self.heap[-1], self.heap[0]
lowerCAmelCase_ , lowerCAmelCase_ :int = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase_ :Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __A ) -> Any:
self.heap.append(__A )
lowerCAmelCase_ :Optional[int] = len(self.heap ) - 1
lowerCAmelCase_ :Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __A , __A ) -> str:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase_ :List[str] = new_value
lowerCAmelCase_ :str = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCAmelCase = Node('R', -1)
__UpperCAmelCase = Node('B', 6)
__UpperCAmelCase = Node('A', 3)
__UpperCAmelCase = Node('X', 1)
__UpperCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 0 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def A_ ( *SCREAMING_SNAKE_CASE_ ) ->List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
lowercase_ = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def A_ ( SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1_28 ) ->Tuple:
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE_ , starting_batch_size=SCREAMING_SNAKE_CASE_ )
lowercase_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase_ = list(inspect.signature(SCREAMING_SNAKE_CASE_ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE_ ) < (len(SCREAMING_SNAKE_CASE_ ) + 1):
lowercase_ = """, """.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 451 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class _a ( __a ):
"""simple docstring"""
A_ = '''camembert'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any]=30_522 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Dict=12 , lowercase_ : Tuple=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=2 , lowercase_ : str=0.0_2 , lowercase_ : int=1e-12 , lowercase_ : str=1 , lowercase_ : List[str]=0 , lowercase_ : int=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , **lowercase_ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
class _a ( __a ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 451 | 1 |
'''simple docstring'''
import random
from typing import Any
def _A ( _lowerCAmelCase ):
"""simple docstring"""
for _ in range(len(lowerCAmelCase__ ) ):
__lowercase =random.randint(0 , len(lowerCAmelCase__ ) - 1 )
__lowercase =random.randint(0 , len(lowerCAmelCase__ ) - 1 )
__lowercase =data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 709 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__lowercase =DatasetInfosDict.from_directory(_lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowerCAmelCase )
dataset_info.write_to_directory(_lowerCAmelCase )
__lowercase =DatasetInfo.from_directory(_lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCAmelCase , 'dataset_info.json' ) )
def _A ( ):
"""simple docstring"""
__lowercase =DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__lowercase =dataset_info._to_yaml_dict()
assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowercase =yaml.safe_dump(_lowerCAmelCase )
__lowercase =yaml.safe_load(_lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def _A ( ):
"""simple docstring"""
__lowercase =DatasetInfo()
__lowercase =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowerCAmelCase )
dataset_infos_dict.write_to_directory(_lowerCAmelCase )
__lowercase =DatasetInfosDict.from_directory(_lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowercase =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowercase =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCAmelCase , 'README.md' ) )
| 454 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , ) -> Optional[int]:
snake_case_ : Optional[int] = parent
snake_case_ : str = batch_size
snake_case_ : Any = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : int = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : str = num_labels
snake_case_ : Union[str, Any] = scope
snake_case_ : List[Any] = range_bbox
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Tuple = bbox[i, j, 3]
snake_case_ : Union[str, Any] = bbox[i, j, 1]
snake_case_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : Dict = bbox[i, j, 2]
snake_case_ : Tuple = bbox[i, j, 0]
snake_case_ : Dict = t
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self ) -> Dict:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
snake_case_ : int = LiltModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[int] = model(_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
snake_case_ : str = model(_lowercase , bbox=_lowercase , token_type_ids=_lowercase )
snake_case_ : Dict = model(_lowercase , bbox=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
snake_case_ : Optional[int] = self.num_labels
snake_case_ : Dict = LiltForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[int] = model(
_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
snake_case_ : Dict = LiltForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Dict = model(
_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : int = config_and_inputs
snake_case_ : Any = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Optional[Any] = False
A : Optional[Any] = False
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return True
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Optional[Any] = LiltModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*_lowercase )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Dict = LiltModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Any = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_lowercase )
snake_case_ : Dict = torch.tensor([[1, 2]] , device=_lowercase )
snake_case_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowercase )
# forward pass
with torch.no_grad():
snake_case_ : List[str] = model(input_ids=_lowercase , bbox=_lowercase )
snake_case_ : List[Any] = torch.Size([1, 2, 768] )
snake_case_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , _lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowercase , atol=1e-3 ) )
| 568 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 475 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ = KandinskyVaaInpaintPipeline
A_ = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ = False
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowercase : List[Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : Tuple = self.dummy_unet
_lowercase : List[str] = self.dummy_movq
_lowercase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__lowerCamelCase , )
_lowercase : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_lowercase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
_lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_lowercase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowercase : int = np.ones((64, 64) , dtype=np.floataa )
_lowercase : List[Any] = 0
if str(__lowerCamelCase ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(__lowerCamelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_lowercase : Optional[int] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = '''cpu'''
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
_lowercase : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_lowercase : Optional[int] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
_lowercase : Optional[Any] = output.images
_lowercase : Dict = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowercase : Dict = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowercase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = np.ones((768, 768) , dtype=np.floataa )
_lowercase : List[Any] = 0
_lowercase : Any = '''a hat'''
_lowercase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
_lowercase : str = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowercase : Union[str, Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
_lowercase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : Optional[Any] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
image=__lowerCamelCase , mask_image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowercase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ : List[Any] = logging.getLogger()
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase__ : str = parser.parse_args()
return args.f
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_):
lowercase__ : Any = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE_ , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
| 12 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self , snake_case__ = True ):
"""simple docstring"""
lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
lowerCAmelCase : str = directed
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
self.adj_list[destination_vertex].append(snake_case__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
lowerCAmelCase : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case__ )
lowerCAmelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCAmelCase : List[Any] = [destination_vertex]
lowerCAmelCase : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
lowerCAmelCase : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCAmelCase : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCAmelCase : Tuple = [destination_vertex]
lowerCAmelCase : Any = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 645 | 0 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = """▁"""
lowerCAmelCase__ : Optional[Any] = {"""vocab_file""": """prophetnet.tokenizer"""}
lowerCAmelCase__ : Dict = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
lowerCAmelCase__ : Union[str, Any] = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
lowerCAmelCase__ : Tuple = {
"""microsoft/xprophetnet-large-wiki100-cased""": 512,
}
def _a ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
snake_case__ : List[str] = collections.OrderedDict()
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
snake_case__ : Any = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case__ : Union[str, Any] = token.rstrip('''\n''' )
snake_case__ : List[str] = index
return vocab
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : int , snake_case_ : str , snake_case_ : Any="[SEP]" , snake_case_ : Optional[Any]="[SEP]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : Union[str, Any]="[UNK]" , snake_case_ : Tuple="[PAD]" , snake_case_ : Optional[Any]="[CLS]" , snake_case_ : Optional[Any]="[MASK]" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
snake_case__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
snake_case__ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case__ : List[str] = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0 ):
snake_case__ : Union[str, Any] = F"""[unused{i}]"""
snake_case__ : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case__ : str = 1_2
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case_ )
def __getstate__( self : Dict ):
'''simple docstring'''
snake_case__ : Tuple = self.__dict__.copy()
snake_case__ : int = None
return state
def __setstate__( self : Optional[Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : List[Any] = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self : Optional[int] , snake_case_ : str ):
'''simple docstring'''
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Union[str, Any] = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Tuple ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self : Tuple , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = ''''''.join(snake_case_ ).replace(snake_case_ , ''' ''' ).strip()
return out_string
def __magic_name__ ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : str = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 502 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : Tuple , snake_case_ : List[str]=1_3 , snake_case_ : Dict=7 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : int=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=9_9 , snake_case_ : List[Any]=2_4 , snake_case_ : Optional[Any]=2 , snake_case_ : Union[str, Any]=6 , snake_case_ : Any=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : int=3 , snake_case_ : int=None , snake_case_ : Union[str, Any]=1_0_0_0 , ):
'''simple docstring'''
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Tuple = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = num_labels
snake_case__ : Dict = scope
snake_case__ : Tuple = range_bbox
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ : Optional[int] = bbox[i, j, 3]
snake_case__ : Dict = bbox[i, j, 1]
snake_case__ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ : Dict = bbox[i, j, 2]
snake_case__ : List[str] = bbox[i, j, 0]
snake_case__ : Dict = t
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : Optional[Any] = LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
snake_case__ : Optional[Any] = model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ )
snake_case__ : Union[str, Any] = model(snake_case_ , bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : int , snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : str = LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[Any] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int , ):
'''simple docstring'''
snake_case__ : Optional[int] = LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[int] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : Any ):
'''simple docstring'''
return True
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = LiltModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def __magic_name__ ( self : str ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Any = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(snake_case_ )
snake_case__ : int = torch.tensor([[1, 2]] , device=snake_case_ )
snake_case__ : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(input_ids=snake_case_ , bbox=snake_case_ )
snake_case__ : Tuple = torch.Size([1, 2, 7_6_8] )
snake_case__ : Dict = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=snake_case_ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1e-3 ) )
| 502 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase : Optional[int] =logging.getLogger(__name__)
_lowercase : Optional[int] =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowercase : str =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The input training data file (a text file).'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
_a : bool = field(default=snake_case__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
_a : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
_a : float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
_a : int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
_a : int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,):
def _dataset(lowerCAmelCase__ ,lowerCAmelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase__ ,file_path=lowerCAmelCase__ ,block_size=args.block_size ,ref_path=lowerCAmelCase__ ,)
return LineByLineTextDataset(tokenizer=lowerCAmelCase__ ,file_path=lowerCAmelCase__ ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase__ ,file_path=lowerCAmelCase__ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowerCAmelCase__ ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
lowerCamelCase_ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCamelCase_ : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCamelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,)
else:
logger.info('Training new model from scratch' )
lowerCamelCase_ : int = AutoModelWithLMHead.from_config(lowerCAmelCase__ )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCamelCase_ : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCamelCase_ : int = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
lowerCamelCase_ : int = (
get_dataset(lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCamelCase_ : List[Any] = (
get_dataset(lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,evaluate=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCamelCase_ : Tuple = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase__ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCamelCase_ : int = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase__ ,mlm_probability=data_args.mlm_probability )
else:
lowerCamelCase_ : Any = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,train_dataset=lowerCAmelCase__ ,eval_dataset=lowerCAmelCase__ ,prediction_loss_only=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ : Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ : List[str] = trainer.evaluate()
lowerCamelCase_ : Any = math.exp(eval_output['eval_loss'] )
lowerCamelCase_ : Any = {'perplexity': perplexity}
lowerCamelCase_ : Any = os.path.join(training_args.output_dir ,'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowerCAmelCase__ ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' ,lowerCAmelCase__ ,str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowerCAmelCase__ )
return results
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 364 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Union[str, Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1_60_00 ):
lowerCamelCase_ : List[str] = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
lowerCamelCase_ : int = randint(0 ,len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(default=snake_case__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_a : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_a : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a : float = field(
default=2_0 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCamelCase_ :
_a : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_a : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( self : Optional[int] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase_ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase_ : Optional[int] = DatasetDict()
lowerCamelCase_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--label_column_name` to the correct text column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase_ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase_ : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase_ : Union[str, Any] = random_subsample(
audio['array'] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
lowerCamelCase_ : int = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[Any] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowerCamelCase_ : Optional[Any] = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[int] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase_ : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = str(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
lowerCamelCase_ : Tuple = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=lowerCAmelCase__ ,references=eval_pred.label_ids )
lowerCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowerCAmelCase__ ) ,labelaid=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,finetuning_task='audio-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
lowerCamelCase_ : str = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=raw_datasets['train'] if training_args.do_train else None ,eval_dataset=raw_datasets['eval'] if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
lowerCamelCase_ : Optional[int] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ : str = trainer.evaluate()
trainer.log_metrics('eval' ,lowerCAmelCase__ )
trainer.save_metrics('eval' ,lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowerCamelCase_ : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 364 | 1 |
'''simple docstring'''
import socket
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
snake_case_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
snake_case_ = socket.gethostname()
snake_case_ = 1_2312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case_ = sock.recv(1024 )
if not data:
break
out_file.write(__UpperCAmelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 593 |
'''simple docstring'''
from __future__ import annotations
import time
a : Dict = list[tuple[int, int]]
a : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a :
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Node | None ):
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = parent
class a :
def __init__( self : Optional[Any] , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] ):
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase_ )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase_ )
snake_case_ = [self.start]
snake_case_ = False
def A_ ( self : List[Any] ):
while self.node_queue:
snake_case_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(lowercase_ )
snake_case_ = self.get_successors(lowercase_ )
for node in successors:
self.node_queue.append(lowercase_ )
if not self.reached:
return [self.start.pos]
return None
def A_ ( self : Any , lowercase_ : Node ):
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , lowercase_ ) )
return successors
def A_ ( self : int , lowercase_ : Node | None ):
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
class a :
def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ):
snake_case_ = BreadthFirstSearch(lowercase_ , lowercase_ )
snake_case_ = BreadthFirstSearch(lowercase_ , lowercase_ )
snake_case_ = False
def A_ ( self : Tuple ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case_ = self.fwd_bfs.node_queue.pop(0 )
snake_case_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case_ = True
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A_ ( self : Optional[Any] , lowercase_ : Node , lowercase_ : Node ):
snake_case_ = self.fwd_bfs.retrace_path(lowercase_ )
snake_case_ = self.bwd_bfs.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a : Any = (0, 0)
a : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a : List[Any] = time.time()
a : Any = BreadthFirstSearch(init, goal)
a : List[Any] = bfs.search()
a : List[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a : Optional[Any] = time.time()
a : Tuple = BidirectionalBreadthFirstSearch(init, goal)
a : str = bd_bfs.search()
a : Optional[int] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 593 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=a_ ) as mock_head:
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=a_ ) as mock_head:
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
try:
snake_case = tempfile.mktemp()
with open(a_ , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , a_ )
snake_case = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , a_ )
snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def lowerCAmelCase ( self : str )-> List[Any]:
snake_case = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> List[Any]:
snake_case = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> Dict:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def lowerCAmelCase ( self : int )-> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(a_ , """vocab.txt""" )
with open(a_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(a_ )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id="""test-tokenizer""" , push_to_hub=a_ , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase ( self : int )-> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(a_ , """vocab.txt""" )
with open(a_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(a_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=a_ , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(a_ , """vocab.txt""" )
with open(a_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(a_ , """vocab.txt""" )
with open(a_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
snake_case = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
snake_case = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> Optional[Any]:
snake_case = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def lowerCAmelCase ( self : str )-> List[Any]:
snake_case = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def lowerCAmelCase ( self : Optional[Any] )-> str:
snake_case = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : str )-> Any:
snake_case = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def lowerCAmelCase ( self : Tuple )-> Dict:
snake_case = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = Trie()
snake_case = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ["""AB""", """C"""] )
| 369 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : List[Any]=2_81_23 ) -> str:
__snake_case = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case = set()
__snake_case = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_UpperCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 69 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
pass
@is_pipeline_test
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase_ ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
] , )
@require_tf
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
] , )
@slow
@require_torch
def UpperCAmelCase ( self ) -> Any:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase ( self ) -> Optional[int]:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(lowerCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 713 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 83 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowerCAmelCase_ : Optional[int] = ['accelerate', 'launch']
lowerCAmelCase_ : str = Path.home() / '.cache/huggingface/accelerate'
lowerCAmelCase_ : List[str] = 'default_config.yaml'
lowerCAmelCase_ : int = config_folder / config_file
lowerCAmelCase_ : str = config_folder / '_default_config.yaml'
lowerCAmelCase_ : str = Path('tests/test_configs' )
@classmethod
def A__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def A__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def A__ ( self ):
UpperCAmelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def A__ ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=lowerCAmelCase ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(lowerCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def A__ ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = 'test-tpu'
lowerCAmelCase_ : Tuple = 'us-central1-a'
lowerCAmelCase_ : List[str] = 'ls'
lowerCAmelCase_ : Optional[int] = ['accelerate', 'tpu-config']
lowerCAmelCase_ : Union[str, Any] = 'cd /usr/share'
lowerCAmelCase_ : Any = 'tests/test_samples/test_command_file.sh'
lowerCAmelCase_ : str = 'Running gcloud compute tpus tpu-vm ssh'
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=lowerCAmelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
| 579 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 579 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 716 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , ):
output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , )
else:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
__lowercase : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase : List[str] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase : str = '''cpu'''
__lowercase : Optional[Any] = Path(__UpperCamelCase )
# VAE DECODER
__lowercase : List[str] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase : List[Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase : Optional[int] = vae_decoder.decode
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , 25 , 25 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__UpperCamelCase , )
del vae_decoder
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 523 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : str = np.shape(_UpperCamelCase)
if rows != columns:
UpperCamelCase : List[Any] = (
'''\'table\' has to be of square shaped array but got a '''
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_UpperCamelCase)
UpperCamelCase : List[Any] = np.zeros((rows, columns))
UpperCamelCase : List[str] = np.zeros((rows, columns))
for i in range(_UpperCamelCase):
for j in range(_UpperCamelCase):
UpperCamelCase : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase))
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists")
UpperCamelCase : Tuple = (table[i][j] - total) / upper[j][j]
UpperCamelCase : Tuple = 1
for j in range(_UpperCamelCase , _UpperCamelCase):
UpperCamelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase))
UpperCamelCase : Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A__ : List[Any] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
_lowercase: Any = global_rng
_lowercase: List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> List[Any]:
"""simple docstring"""
_lowercase: str = parent
_lowercase: int = batch_size
_lowercase: Tuple = min_seq_length
_lowercase: Any = max_seq_length
_lowercase: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase: Any = padding_value
_lowercase: List[str] = sampling_rate
_lowercase: Union[str, Any] = return_attention_mask
_lowercase: Optional[Any] = do_normalize
_lowercase: List[str] = feature_size
_lowercase: Optional[Any] = chunk_length
_lowercase: str = hop_length
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , A_=False , A_=False ) -> List[Any]:
"""simple docstring"""
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
_lowercase: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase: Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase: str = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Any = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
_lowercase: str = self.feature_extraction_class.from_pretrained(A_ )
_lowercase: Dict = feat_extract_first.to_dict()
_lowercase: List[str] = feat_extract_second.to_dict()
_lowercase: List[str] = feat_extract_first.mel_filters
_lowercase: str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
_lowercase: Tuple = self.feature_extraction_class.from_json_file(A_ )
_lowercase: int = feat_extract_first.to_dict()
_lowercase: Optional[int] = feat_extract_second.to_dict()
_lowercase: Tuple = feat_extract_first.mel_filters
_lowercase: Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowercase: List[str] = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
_lowercase: Dict = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase: Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
_lowercase: Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase: Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase: Any = np.asarray(A_ )
_lowercase: Tuple = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
_lowercase: List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowercase: List[Any] = [np.asarray(A_ ) for speech_input in speech_inputs]
_lowercase: Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase: str = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
_lowercase: Optional[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_lowercase: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_lowercase: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase: Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase: int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowercase: List[str] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_lowercase: Optional[Any] = self._load_datasamples(1 )
_lowercase: Optional[int] = WhisperFeatureExtractor()
_lowercase: Dict = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[str] = self._load_datasamples(1 )[0]
_lowercase: Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowercase: Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 353 | 0 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
UpperCAmelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCAmelCase = new_module
UpperCAmelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
UpperCAmelCase = tensor_name in module._buffers
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
UpperCAmelCase = False
UpperCAmelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase = False
UpperCAmelCase = False
else:
UpperCAmelCase = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase = old_value.to(lowerCAmelCase )
elif isinstance(lowerCAmelCase , torch.Tensor ):
UpperCAmelCase = value.to("""cpu""" )
if value.dtype == torch.inta:
UpperCAmelCase = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
UpperCAmelCase = torch.tensor(lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCAmelCase ) and fpaa_statistics is None:
UpperCAmelCase = new_value.T
UpperCAmelCase = old_value.__dict__
if is_abit:
UpperCAmelCase = bnb.nn.IntaParams(lowerCAmelCase , requires_grad=lowerCAmelCase , **lowerCAmelCase ).to(lowerCAmelCase )
elif is_abit:
UpperCAmelCase = bnb.nn.Paramsabit(lowerCAmelCase , requires_grad=lowerCAmelCase , **lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCAmelCase ) )
else:
if value is None:
UpperCAmelCase = old_value.to(lowerCAmelCase )
elif isinstance(lowerCAmelCase , torch.Tensor ):
UpperCAmelCase = value.to(lowerCAmelCase )
else:
UpperCAmelCase = torch.tensor(lowerCAmelCase , device=lowerCAmelCase )
if is_buffer:
UpperCAmelCase = new_value
else:
UpperCAmelCase = nn.Parameter(lowerCAmelCase , requires_grad=old_value.requires_grad )
UpperCAmelCase = new_value
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase = []
current_key_name.append(lowerCAmelCase )
if (isinstance(lowerCAmelCase , nn.Linear ) or isinstance(lowerCAmelCase , lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase = module.weight.shape
else:
UpperCAmelCase = module.in_features
UpperCAmelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase = bnb.nn.LinearabitLt(
lowerCAmelCase , lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase = bnb.nn.Linearabit(
lowerCAmelCase , lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase = type(lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase )
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_linear(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_been_replaced=lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
'''simple docstring'''
UpperCAmelCase = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_linear(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowerCAmelCase ( *lowerCAmelCase , **lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCAmelCase , )
return replace_with_bnb_linear(*lowerCAmelCase , **lowerCAmelCase )
def _lowerCAmelCase ( *lowerCAmelCase , **lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*lowerCAmelCase , **lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = deepcopy(lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase = find_tied_parameters(lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase = sum(lowerCAmelCase , [] )
UpperCAmelCase = len(lowerCAmelCase ) > 0
# Check if it is a base model
UpperCAmelCase = not hasattr(lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase = list(model.named_children() )
UpperCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase = set(lowerCAmelCase ) - set(lowerCAmelCase )
UpperCAmelCase = list(set(lowerCAmelCase ) ) + list(lowerCAmelCase )
# remove ".weight" from the keys
UpperCAmelCase = [""".weight""", """.bias"""]
UpperCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase = name.replace(lowerCAmelCase , """""" )
filtered_module_names.append(lowerCAmelCase )
return filtered_module_names
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase_ ( a_ ):
_A : int = 'xlm-roberta-xl'
def __init__( self , snake_case__=25_08_80 , snake_case__=25_60 , snake_case__=36 , snake_case__=32 , snake_case__=1_02_40 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_14 , snake_case__=1 , snake_case__=0.02 , snake_case__=1e-05 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 378 | 0 |
'''simple docstring'''
from collections import namedtuple
_UpperCamelCase = namedtuple('from_to', 'from_ to')
_UpperCamelCase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 264.172),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4226.75),
}
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(_lowerCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(_lowerCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 459 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : torch.FloatTensor
class snake_case ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ : Tuple[int] = (64,) , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : float = 0.18215 , lowerCamelCase_ : str = "group" , ) ->str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
UpperCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->VQEncoderOutput:
'''simple docstring'''
UpperCAmelCase__ = self.encoder(lowerCamelCase_ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
UpperCAmelCase__ = h
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(lowerCamelCase_ ).latents
UpperCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 392 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Optional[int] = {'''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = None
def __init__( self : str , _a : Tuple=None , _a : List[str]=None , _a : Union[str, Any]=None , _a : str="<unk>" , _a : Tuple="<s>" , _a : Optional[int]="</s>" , _a : Optional[Any]="<pad>" , _a : Optional[Any]=False , _a : str=False , **_a : Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , add_prefix_space=_a , clean_up_tokenization_spaces=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
def __UpperCamelCase ( self : Optional[Any] , *_a : List[Any] , **_a : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Optional[int] , *_a : Union[str, Any] , **_a : str ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''' )
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Optional[int] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : int , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
return input_ids | 191 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LxmertTokenizer
UpperCAmelCase = LxmertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : List[str] , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a ) | 191 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''lxmert'''
UpperCAmelCase : str = {}
def __init__( self : int , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Dict=9_500 , _UpperCAmelCase : List[Any]=1_600 , _UpperCAmelCase : List[Any]=400 , _UpperCAmelCase : Union[str, Any]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : int=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=9 , _UpperCAmelCase : Any=5 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=2_048 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=6.67 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Union[str, Any] , ):
_A = vocab_size
_A = hidden_size
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = num_qa_labels
_A = num_object_labels
_A = num_attr_labels
_A = l_layers
_A = x_layers
_A = r_layers
_A = visual_feat_dim
_A = visual_pos_dim
_A = visual_loss_normalizer
_A = task_matched
_A = task_mask_lm
_A = task_obj_predict
_A = task_qa
_A = visual_obj_loss
_A = visual_attr_loss
_A = visual_feat_loss
_A = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__a )
| 7 | '''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : List[str] , __a : List[Any] , __a : Dict=None , __a : str=None , __a : List[Any]=None , __a : Union[str, Any]="resnet50" , __a : List[Any]=3 , __a : List[Any]=3_2 , __a : List[Any]=3 , __a : Optional[int]=True , __a : str=True , ) -> Any:
__UpperCAmelCase = parent
__UpperCAmelCase = out_indices if out_indices is not None else [4]
__UpperCAmelCase = stage_names
__UpperCAmelCase = out_features
__UpperCAmelCase = backbone
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = use_pretrained_backbone
__UpperCAmelCase = is_training
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self : Union[str, Any] ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[Any] ) -> Tuple:
__UpperCAmelCase = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def snake_case__ ( self : Dict ) -> Tuple:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def snake_case__ ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase = TimmBackboneModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case__ ( self : List[str] ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = '''resnet18'''
__UpperCAmelCase = '''microsoft/resnet-18'''
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def snake_case__ ( self : str ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def snake_case__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def snake_case__ ( self : str ) -> int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def snake_case__ ( self : str ) -> Dict:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Tuple ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def snake_case__ ( self : List[str] ) -> Dict:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def snake_case__ ( self : int ) -> int:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : int ) -> int:
pass
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = True
__UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCAmelCase = self.all_model_classes[0]
__UpperCAmelCase = model_class(__a )
model.to(__a )
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model(**__a )
__UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case__ ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCAmelCase = copy.deepcopy(__a )
__UpperCAmelCase = None
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__UpperCAmelCase = copy.deepcopy(__a )
__UpperCAmelCase = False
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
| 262 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any=0.2 , lowercase_ : int=0.2 ):
lowercase_ : Optional[int] = bp_numa
lowercase_ : int = bp_numa
lowercase_ : Optional[int] = bp_numa
lowercase_ : List[str] = conva_get[:2]
lowercase_ : str = conva_get[2]
lowercase_ : Tuple = size_pa
lowercase_ : Union[str, Any] = rate_w
lowercase_ : Optional[Any] = rate_t
lowercase_ : str = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase_ : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
# save model dict with pickle
lowercase_ : int = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase_ , """wb""" ) as f:
pickle.dump(lowercase_ , lowercase_ )
print(f'''Model saved: {save_path}''' )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , lowercase_ : List[Any] ):
# read saved model
with open(lowercase_ , """rb""" ) as f:
lowercase_ : List[Any] = pickle.load(lowercase_ ) # noqa: S301
lowercase_ : Any = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowercase_ : Tuple = model_dic.get("""size_pooling1""" )
lowercase_ : Tuple = model_dic.get("""num_bp1""" )
lowercase_ : Dict = model_dic.get("""num_bp2""" )
lowercase_ : Tuple = model_dic.get("""num_bp3""" )
lowercase_ : List[str] = model_dic.get("""rate_weight""" )
lowercase_ : Optional[int] = model_dic.get("""rate_thre""" )
# create model instance
lowercase_ : List[str] = CNN(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# modify model parameter
lowercase_ : int = model_dic.get("""w_conv1""" )
lowercase_ : str = model_dic.get("""wkj""" )
lowercase_ : List[Any] = model_dic.get("""vji""" )
lowercase_ : Union[str, Any] = model_dic.get("""thre_conv1""" )
lowercase_ : Union[str, Any] = model_dic.get("""thre_bp2""" )
lowercase_ : List[str] = model_dic.get("""thre_bp3""" )
return conv_ins
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
return round(lowercase_ , 3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Dict ):
# convolution process
lowercase_ : List[str] = convs[0]
lowercase_ : Tuple = convs[1]
lowercase_ : Tuple = np.shape(lowercase_ )[0]
# get the data slice of original image data, data_focus
lowercase_ : Dict = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
lowercase_ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : List[Any] = []
lowercase_ : int = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase_ ):
lowercase_ : Any = []
for i_focus in range(len(lowercase_ ) ):
lowercase_ : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase_ ) )
lowercase_ : Optional[int] = np.asmatrix(lowercase_ ).reshape(
lowercase_ , lowercase_ )
data_featuremap.append(lowercase_ )
# expanding the data slice to One dimenssion
lowercase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase_ ) )
lowercase_ : Tuple = np.asarray(lowercase_ )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple="average_pool" ):
# pooling process
lowercase_ : int = len(featuremaps[0] )
lowercase_ : Optional[int] = int(size_map / size_pooling )
lowercase_ : Dict = []
for i_map in range(len(lowercase_ ) ):
lowercase_ : Any = featuremaps[i_map]
lowercase_ : List[Any] = []
for i_focus in range(0 , lowercase_ , lowercase_ ):
for j_focus in range(0 , lowercase_ , lowercase_ ):
lowercase_ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase_ ) )
lowercase_ : int = np.asmatrix(lowercase_ ).reshape(lowercase_ , lowercase_ )
featuremap_pooled.append(lowercase_ )
return featuremap_pooled
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase_ : Optional[int] = []
for i in range(len(lowercase_ ) ):
lowercase_ : List[str] = np.shape(data[i] )
lowercase_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase_ : int = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase_ )
lowercase_ : Union[str, Any] = np.asarray(lowercase_ )
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Dict ):
# expanding matrix to one dimension list
lowercase_ : Optional[int] = np.asarray(lowercase_ )
lowercase_ : Optional[Any] = np.shape(lowercase_ )
lowercase_ : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ):
lowercase_ : Tuple = []
lowercase_ : List[str] = 0
for i_map in range(lowercase_ ):
lowercase_ : Any = np.ones((size_map, size_map) )
for i in range(0 , lowercase_ , lowercase_ ):
for j in range(0 , lowercase_ , lowercase_ ):
lowercase_ : List[str] = pd_pool[
i_pool
]
lowercase_ : Tuple = i_pool + 1
lowercase_ : Dict = np.multiply(
lowercase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase_ )
return pd_all
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase_ )) )
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = []
lowercase_ : str = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : Any = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowercase_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : Any = np.asmatrix(datas_train[p] )
lowercase_ : Optional[int] = np.asarray(datas_teach[p] )
lowercase_ : List[Any] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase_ : List[Any] = self.pooling(lowercase_ , self.size_poolinga )
lowercase_ : List[str] = np.shape(lowercase_ )
lowercase_ : Dict = self._expand(lowercase_ )
lowercase_ : Tuple = data_bp_input
lowercase_ : Union[str, Any] = np.dot(lowercase_ , self.vji.T ) - self.thre_bpa
lowercase_ : Tuple = self.sig(lowercase_ )
lowercase_ : Tuple = np.dot(lowercase_ , self.wkj.T ) - self.thre_bpa
lowercase_ : Tuple = self.sig(lowercase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase_ , (1 - bp_outa) ) )
lowercase_ : str = np.multiply(
np.dot(lowercase_ , self.wkj ) , np.multiply(lowercase_ , (1 - bp_outa) ) )
lowercase_ : List[Any] = np.dot(lowercase_ , self.vji )
lowercase_ : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Tuple = pd_conva_pooled.T.getA().tolist()
lowercase_ : Any = self._calculate_gradient_from_pool(
lowercase_ , lowercase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Dict = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Union[str, Any] = self.rate_weight * np.dot(lowercase_ , lowercase_ )
lowercase_ : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : Any = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : Dict = rp + 1
lowercase_ : Any = error_count / patterns
all_mse.append(lowercase_ )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase_ , """+-""" )
plt.plot(lowercase_ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase_ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] ):
# model predict
lowercase_ : List[Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase_ )) )
for p in range(len(lowercase_ ) ):
lowercase_ : Any = np.asmatrix(datas_test[p] )
lowercase_ : Any = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase_ : List[str] = self.pooling(lowercase_ , self.size_poolinga )
lowercase_ : List[Any] = self._expand(lowercase_ )
lowercase_ : str = data_bp_input
lowercase_ : int = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : Union[str, Any] = self.sig(lowercase_ )
lowercase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Any = self.sig(lowercase_ )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[Any] = [list(map(self.do_round , lowercase_ ) ) for each in produce_out]
return np.asarray(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
# return the data of image after convoluting process so we can check it out
lowercase_ : str = np.asmatrix(lowercase_ )
lowercase_ : Optional[int] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase_ : Dict = self.pooling(lowercase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 708 | '''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30 | 0 |
"""simple docstring"""
UpperCAmelCase__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _UpperCAmelCase ( __lowerCamelCase : str ) -> int:
_snake_case = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCamelCase )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](__lowerCamelCase , __lowerCamelCase )
operand_stack.push(__lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 224 |
"""simple docstring"""
from timeit import timeit
UpperCAmelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = 0
_snake_case = len(__lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = len(__lowerCamelCase ) // 2
_snake_case = len(__lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
if len(__lowerCamelCase ) <= 2:
return True
if s[0] == s[len(__lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = f'''all({name}(key) is value for key, value in test_data.items())'''
_snake_case = f'''from __main__ import test_data, {name}'''
_snake_case = 50_00_00
_snake_case = timeit(stmt=__lowerCamelCase , setup=__lowerCamelCase , number=__lowerCamelCase )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 224 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 |
__magic_name__ = range(2, 20 + 1)
__magic_name__ = [10**k for k in range(ks[-1] + 1)]
__magic_name__ = {}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = sum(a_i[j] for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ) )
lowercase = sum(a_i[j] * base[j] for j in range(min(len(_UpperCAmelCase ) , _UpperCAmelCase ) ) )
lowercase , lowercase = 0, 0
lowercase = n - i
lowercase = memo.get(_UpperCAmelCase )
if sub_memo is not None:
lowercase = sub_memo.get(_UpperCAmelCase )
if jumps is not None and len(_UpperCAmelCase ) > 0:
# find and make the largest jump without going over
lowercase = -1
for _k in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase = diff + c
for j in range(min(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
if new_c > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
lowercase = []
else:
lowercase = {c: []}
lowercase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase = next_term(_UpperCAmelCase , k - 1 , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase = compute(_UpperCAmelCase , _UpperCAmelCase , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
lowercase = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase = 0
while j < len(_UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase = i
lowercase , lowercase , lowercase = 0, 0, 0
for j in range(len(_UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase = ds_c + ds_b
diff += addend
lowercase = 0
for j in range(_UpperCAmelCase ):
lowercase = a_i[j] + addend
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return diff, i - start_i
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowercase = digits[j] + addend
if s >= 10:
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
lowercase = addend // 10 + quotient
else:
lowercase = s
lowercase = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase = divmod(_UpperCAmelCase , 10 )
digits.append(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase = 10**15 ):
"""simple docstring"""
lowercase = [1]
lowercase = 1
lowercase = 0
while True:
lowercase , lowercase = next_term(_UpperCAmelCase , 20 , i + dn , _UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase = 0
for j in range(len(_UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = 'ylacombe/bark-small'
lowercase__ = tempfile.mkdtemp()
lowercase__ = 'en_speaker_1'
lowercase__ = 'This is a test string'
lowercase__ = 'speaker_embeddings_path.json'
lowercase__ = 'speaker_embeddings'
def UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
'semantic_prompt': np.ones(lowerCAmelCase),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=lowerCAmelCase)
lowercase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase , np.array([])).tolist())
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , 'file.npz')
np.savez(lowerCAmelCase , **lowerCAmelCase)
lowercase__ = processor(text=self.input_string , voice_preset=lowerCAmelCase)
lowercase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase , np.array([])).tolist())
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset)
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=lowerCAmelCase)
lowercase__ = processor(text=self.input_string)
lowercase__ = tokenizer(
self.input_string , padding='max_length' , max_length=2_56 , add_special_tokens=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 622 |
def _lowerCAmelCase ( A__ = 50_000_000 ):
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A__ ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(A__ )
return len(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 622 | 1 |
"""simple docstring"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 168 |
"""simple docstring"""
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = StableUnCLIPPipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_SCREAMING_SNAKE_CASE : Optional[int] = False
def a ( self : Tuple ) -> Any:
__snake_case = 32
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=SCREAMING_SNAKE_CASE_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE_ , num_layers=1 , )
torch.manual_seed(0 )
__snake_case = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE_ )
__snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE_ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE_ , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ) -> Optional[Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self : int ) -> str:
__snake_case = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[Any]:
__snake_case = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> List[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe('anime turle' , generator=SCREAMING_SNAKE_CASE_ , output_type='np' )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 56 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( UpperCAmelCase ):
_lowercase = "conditional_detr"
_lowercase = ["past_key_values"]
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[Any] = backbone_config.get("model_type" )
_UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Dict = config_class.from_dict(A_ )
_UpperCAmelCase : Any = use_timm_backbone
_UpperCAmelCase : List[Any] = backbone_config
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = num_queries
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : List[str] = activation_function
_UpperCAmelCase : Optional[int] = init_std
_UpperCAmelCase : List[Any] = init_xavier_std
_UpperCAmelCase : Optional[int] = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Union[str, Any] = auxiliary_loss
_UpperCAmelCase : str = position_embedding_type
_UpperCAmelCase : str = backbone
_UpperCAmelCase : int = use_pretrained_backbone
_UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Tuple = bbox_cost
_UpperCAmelCase : Dict = giou_cost
# Loss coefficients
_UpperCAmelCase : Any = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : Any = cls_loss_coefficient
_UpperCAmelCase : Any = bbox_loss_coefficient
_UpperCAmelCase : Optional[int] = giou_loss_coefficient
_UpperCAmelCase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 12
| 300 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase__ : str = '''\
Text data.
Second line of data.'''
lowercase__ : Any = '''file'''
@pytest.fixture(scope='''session''' )
def __lowercase ( _a ):
snake_case_ : List[str] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
snake_case_ : Optional[Any] = bytes(_a , '''utf-8''' )
with zstd.open(_a , '''wb''' ) as f:
f.write(_a )
return path
@pytest.fixture
def __lowercase ( _a ):
with open(os.path.join(tmpfs.local_root_dir , _a ) , '''w''' ) as f:
f.write(_a )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __lowercase ( _a , _a , _a , _a , _a , _a ):
snake_case_ : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
snake_case_ : Optional[int] = input_paths[compression_format]
snake_case_ : Tuple = tmp_path / '''cache'''
snake_case_ : str = DownloadConfig(cache_dir=_a , extract_compressed_file=_a )
snake_case_ : int = cached_path(_a , download_config=_a )
with open(_a ) as f:
snake_case_ : Optional[Any] = f.read()
with open(_a ) as f:
snake_case_ : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : List[str] = '''custom_cache'''
snake_case_ : Dict = '''custom_extracted_dir'''
snake_case_ : str = tmp_path / '''custom_extracted_path'''
if default_extracted:
snake_case_ : str = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _a )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_a ) )
snake_case_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ : Optional[int] = xz_file
snake_case_ : List[str] = (
DownloadConfig(extract_compressed_file=_a )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_a )
)
snake_case_ : Optional[Any] = cached_path(_a , download_config=_a )
assert Path(_a ).parent.parts[-2:] == expected
def __lowercase ( _a ):
# absolute path
snake_case_ : Any = str(Path(_a ).resolve() )
assert cached_path(_a ) == text_file
# relative path
snake_case_ : Optional[Any] = str(Path(_a ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_a ) == text_file
def __lowercase ( _a ):
# absolute path
snake_case_ : Dict = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_a ):
cached_path(_a )
# relative path
snake_case_ : int = '''./__missing_file__.txt'''
with pytest.raises(_a ):
cached_path(_a )
def __lowercase ( _a ):
snake_case_ : Optional[int] = get_from_cache(f"tmp://{tmpfs_file}" )
with open(_a ) as f:
snake_case_ : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _a )
def __lowercase ( ):
with pytest.raises(_a ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _a )
def __lowercase ( _a ):
snake_case_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_a ):
http_get('''https://huggingface.co''' , temp_file=_a )
with pytest.raises(_a ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _a )
def __lowercase ( _a ):
snake_case_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_a ):
ftp_get('''ftp://huggingface.co''' , temp_file=_a )
with pytest.raises(_a ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _a )
def __lowercase ( _a ):
snake_case_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_a ):
fsspec_get('''s3://huggingface.co''' , temp_file=_a )
with pytest.raises(_a ):
fsspec_head('''s3://huggingface.co''' )
| 485 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowercase__ : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Optional[int] = getattr(_a , _a ).shape
else:
snake_case_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : Tuple = value
elif weight_type == "weight_v":
snake_case_ : Tuple = value
elif weight_type == "bias":
snake_case_ : int = value
else:
snake_case_ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : int = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ : Any = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Tuple = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : List[str] = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : Dict = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : Any = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "bias" in name:
snake_case_ : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Optional[int] = '''weight'''
else:
snake_case_ : List[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case_ : int = name.split('''.''' )
snake_case_ : int = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case_ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
snake_case_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
snake_case_ : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def __lowercase ( _a , _a , _a=None , _a=None , _a=True ):
if config_path is not None:
snake_case_ : int = UniSpeechSatConfig.from_pretrained(_a )
else:
snake_case_ : Tuple = UniSpeechSatConfig()
snake_case_ : List[Any] = ''''''
if is_finetuned:
snake_case_ : str = UniSpeechSatForCTC(_a )
else:
snake_case_ : Dict = UniSpeechSatForPreTraining(_a )
snake_case_, snake_case_, snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ : Dict = model[0].eval()
recursively_load_weights(_a , _a )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase__ : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 485 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = ['pixel_values']
def __init__( self : int , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 8 , **_lowerCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
__lowercase = pad_size
def _a ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> str:
"""simple docstring"""
__lowercase , __lowercase = get_image_size(_lowerCAmelCase )
__lowercase = (old_height // size + 1) * size - old_height
__lowercase = (old_width // size + 1) * size - old_width
return pad(_lowerCAmelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowerCAmelCase )
def _a ( self : List[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_pad if do_pad is not None else self.do_pad
__lowercase = pad_size if pad_size is not None else self.pad_size
__lowercase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_pad:
__lowercase = [self.pad(_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
__lowercase = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 80 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PIL.Image.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase)
_A : Optional[Any] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
_A : List[Any] = get_size_dict(__lowerCamelCase)
_A : str = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_A : int = get_size_dict(__lowerCamelCase , param_name="crop_size")
_A : str = do_resize
_A : Tuple = size
_A : int = resample
_A : int = do_center_crop
_A : Union[str, Any] = crop_size
_A : Any = do_rescale
_A : str = rescale_factor
_A : Optional[int] = do_normalize
_A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PIL.Image.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_A : List[str] = get_size_dict(__lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return resize(
__lowerCamelCase , size=(size["height"], size["width"]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_A : Dict = get_size_dict(__lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> str:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> PIL.Image.Image:
_A : int = do_resize if do_resize is not None else self.do_resize
_A : Tuple = resample if resample is not None else self.resample
_A : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Any = do_normalize if do_normalize is not None else self.do_normalize
_A : Tuple = image_mean if image_mean is not None else self.image_mean
_A : Tuple = image_std if image_std is not None else self.image_std
_A : Any = size if size is not None else self.size
_A : List[Any] = get_size_dict(__lowerCamelCase)
_A : List[Any] = crop_size if crop_size is not None else self.crop_size
_A : Optional[int] = get_size_dict(__lowerCamelCase , param_name="crop_size")
_A : Any = make_list_of_images(__lowerCamelCase)
if not valid_images(__lowerCamelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_A : Dict = [to_numpy_array(__lowerCamelCase) for image in images]
if do_resize:
_A : List[str] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase) for image in images]
if do_center_crop:
_A : Union[str, Any] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase) for image in images]
if do_rescale:
_A : str = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase) for image in images]
if do_normalize:
_A : Optional[int] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase) for image in images]
_A : Tuple = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase) for image in images]
_A : List[str] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase)
| 503 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Any = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Any = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
UpperCamelCase__ : Tuple = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Tuple = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
UpperCamelCase__ : Optional[int] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
from itertools import product
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = sides_number
__UpperCAmelCase : int = max_face_number * dice_number
__UpperCAmelCase : List[str] = [0] * (max_total + 1)
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : List[str] = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__UpperCAmelCase : Optional[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 9
__UpperCAmelCase : int = 4 * 9
__UpperCAmelCase : str = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__UpperCAmelCase : List[str] = (4**9) * (6**6)
__UpperCAmelCase : str = peter_wins_count / total_games_number
__UpperCAmelCase : List[str] = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = ShapEPipeline
A = ['''prompt''']
A = ['''prompt''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def A_( self ) -> Dict:
"""simple docstring"""
return 32
@property
def A_( self ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def A_( self ) -> str:
"""simple docstring"""
return 8
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE )
@property
def A_( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE_ = PriorTransformer(**SCREAMING_SNAKE_CASE )
return model
@property
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_ = ShapERenderer(**SCREAMING_SNAKE_CASE )
return model
def A_( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_prior
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ = self.dummy_renderer
SCREAMING_SNAKE_CASE_ = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_ = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'cpu'
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_( self ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch_device == 'cpu'
SCREAMING_SNAKE_CASE_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , )
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def A_( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
SCREAMING_SNAKE_CASE_ = ShapEPipeline.from_pretrained('openai/shap-e' )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 205 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Any = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = d_kv
A : Tuple = d_ff
A : Union[str, Any] = num_layers
A : List[str] = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Tuple = relative_attention_max_distance
A : Optional[int] = dropout_rate
A : int = layer_norm_epsilon
A : Tuple = initializer_factor
A : Optional[Any] = use_cache
A : List[str] = eos_token_id
A : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
A : Tuple = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A : List[str] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[int] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Any = hidden_size
A : List[Any] = patch_embed_hidden_size
A : str = d_ff
A : List[Any] = dropout_rate
A : Dict = num_hidden_layers
A : Any = num_attention_heads
A : Dict = initializer_range
A : Any = initializer_factor
A : str = attention_dropout
A : Optional[int] = layer_norm_eps
A : List[Any] = dense_act_fn
A : List[str] = seq_len
A : Dict = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : str = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A : str = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : Tuple = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Dict = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Union[str, Any] = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Optional[int] = self.text_config.eos_token_id
A : Tuple = initializer_factor
A : List[Any] = initializer_range
A : int = self.initializer_range
A : int = self.initializer_range
A : List[str] = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = copy.deepcopy(self.__dict__ )
A : Union[str, Any] = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : List[Any] = self.__class__.model_type
return output
| 701 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False ) -> int:
"""simple docstring"""
A : Any = scheduler
A : Tuple = optimizers if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) else [optimizers]
A : Dict = split_batches
A : Tuple = step_with_optimizer
A : Any = GradientState()
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A : str = AcceleratorState().num_processes
for _ in range(SCREAMING_SNAKE_CASE ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.scheduler.print_lr(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 343 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _A( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowercase , __lowercase , __lowercase = max_subarray(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
__lowercase , __lowercase , __lowercase = max_cross_sum(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _A( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> tuple[int, int, float]:
'''simple docstring'''
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(UpperCamelCase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _A( UpperCamelCase__ : int ) -> float:
'''simple docstring'''
__lowercase = [randint(1 , UpperCamelCase__ ) for _ in range(UpperCamelCase__ )]
__lowercase = time.time()
max_subarray(UpperCamelCase__ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _A( ) -> None:
'''simple docstring'''
__lowercase = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__lowercase = [time_max_subarray(UpperCamelCase__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(UpperCamelCase__ , UpperCamelCase__ ):
print(UpperCamelCase__ , '''\t\t''' , UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 332 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"camembert-base": 512,
}
UpperCAmelCase__ = "▁"
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['input_ids', 'attention_mask']
UpperCamelCase_ : Any = CamembertTokenizer
def __init__( self : Dict , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : List[Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Optional[int]="<mask>" , lowerCamelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 332 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A_ ( A__ ) -> Optional[Any]:
if "model" in orig_key:
a__ : Optional[int] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
a__ : str = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
a__ : Optional[Any] = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
a__ : Any = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
a__ : Tuple = orig_key.split('.' )[0].split('_' )[-1]
a__ : int = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
a__ : Tuple = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
a__ : List[Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
a__ : Optional[int] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
a__ : List[Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
a__ : Dict = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
a__ : str = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
a__ : Dict = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
a__ : int = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
a__ : Any = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
a__ : Any = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
a__ : List[Any] = 'yoso.' + orig_key
return orig_key
def A_ ( A__ , A__ ) -> str:
for key in orig_state_dict.copy().keys():
a__ : Tuple = orig_state_dict.pop(A__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a__ : Optional[int] = val
a__ : Any = orig_state_dict['cls.predictions.decoder.bias']
a__ : Optional[Any] = torch.arange(A__ ).expand((1, -1) ) + 2
return orig_state_dict
def A_ ( A__ , A__ , A__ ) -> List[str]:
a__ : List[Any] = torch.load(A__ , map_location='cpu' )['model_state_dict']
a__ : Optional[int] = YosoConfig.from_json_file(A__ )
a__ : List[str] = YosoForMaskedLM(A__ )
a__ : Tuple = convert_checkpoint_helper(config.max_position_embeddings , A__ )
print(model.load_state_dict(A__ ) )
model.eval()
model.save_pretrained(A__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 392 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=0 , ) -> Dict:
'''simple docstring'''
a__ : str = parent
a__ : int = batch_size
a__ : Optional[int] = seq_length
a__ : Any = is_training
a__ : List[Any] = use_input_mask
a__ : Dict = use_token_type_ids
a__ : str = use_labels
a__ : List[Any] = vocab_size
a__ : List[str] = hidden_size
a__ : int = num_hidden_layers
a__ : Any = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : List[str] = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[int] = initializer_range
a__ : Any = num_labels
a__ : List[Any] = num_choices
a__ : Optional[int] = scope
a__ : Tuple = projection_dim
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : List[str] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
a__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a__ : Tuple = None
if self.use_token_type_ids:
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Tuple = None
a__ : List[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : List[str] = ids_tensor([self.batch_size] , self.num_choices)
a__ : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
a__ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = TFDPRContextEncoder(config=lowercase)
a__ : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : Union[str, Any] = model(lowercase , token_type_ids=lowercase)
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = TFDPRQuestionEncoder(config=lowercase)
a__ : Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : Optional[Any] = model(lowercase , token_type_ids=lowercase)
a__ : str = model(lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = TFDPRReader(config=lowercase)
a__ : Tuple = model(lowercase , attention_mask=lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = config_and_inputs
a__ : List[str] = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__A : Tuple = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
__A : List[str] = False
__A : Any = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : List[Any] = False
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[int] = TFDPRModelTester(self)
a__ : Tuple = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int = TFDPRQuestionEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = TFDPRReader.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
a__ : Tuple = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]]) # [CLS] hello, is my dog cute? [SEP]
a__ : List[str] = model(lowercase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
a__ : int = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 392 | 1 |
"""simple docstring"""
UpperCAmelCase : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase : list[bool | None] = [None] * 1000_0000
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[str] = False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ = chain(next_number(_a ) )
lowercase_ = number_chain
while number < 10_00_00_00:
lowercase_ = number_chain
number *= 10
return number_chain
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_00_00_00 ) -> Optional[Any]:
'''simple docstring'''
for i in range(1 , _a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 567 |
import argparse
import json
from tqdm import tqdm
def A__ ( ):
'''simple docstring'''
snake_case__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=_a , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=_a , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=_a , help="""where to store parsed gold_data_path file""" , )
snake_case__ : Dict =parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
snake_case__ : Any =json.load(_a )
for dpr_record in tqdm(_a ):
snake_case__ : int =dpr_record["""question"""]
snake_case__ : str =[context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_a ) + """\n""" )
if __name__ == "__main__":
main()
| 385 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( lowerCAmelCase: Dict="no" , lowerCAmelCase: str = default_json_config_file , lowerCAmelCase: bool = False )-> Union[str, Any]:
_snake_case : str = Path(lowerCAmelCase )
path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_snake_case : Optional[int] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_snake_case : int = torch.cuda.device_count()
_snake_case : Dict = num_gpus
_snake_case : str = False
if num_gpus > 1:
_snake_case : Optional[int] = 'MULTI_GPU'
else:
_snake_case : Dict = 'NO'
elif is_xpu_available() and use_xpu:
_snake_case : Optional[int] = torch.xpu.device_count()
_snake_case : Dict = num_xpus
_snake_case : Optional[int] = False
if num_xpus > 1:
_snake_case : Dict = 'MULTI_XPU'
else:
_snake_case : Optional[Any] = 'NO'
elif is_npu_available():
_snake_case : List[Any] = torch.npu.device_count()
_snake_case : int = num_npus
_snake_case : int = False
if num_npus > 1:
_snake_case : Optional[int] = 'MULTI_NPU'
else:
_snake_case : Tuple = 'NO'
else:
_snake_case : Optional[Any] = 0
_snake_case : Tuple = True
_snake_case : List[Any] = 1
_snake_case : Tuple = 'NO'
_snake_case : List[str] = ClusterConfig(**lowerCAmelCase )
config.to_json_file(lowerCAmelCase )
return path
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict )-> List[str]:
_snake_case : Tuple = parser.add_parser('default' , parents=lowerCAmelCase , help=lowerCAmelCase , formatter_class=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Dict:
_snake_case : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''bertabs'''
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any]=30522 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=6 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : int=512 , _UpperCAmelCase : Any=0.2 , _UpperCAmelCase : List[Any]=6 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : List[str]=2048 , _UpperCAmelCase : str=0.2 , **_UpperCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 82 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _a ( lowerCAmelCase )-> float:
return np.dot(lowerCAmelCase , lowerCAmelCase )
class lowercase_ :
def __init__( self : int , *,
snake_case__ : float = np.inf , snake_case__ : str = "linear" , snake_case__ : float = 0.0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = regularization
SCREAMING_SNAKE_CASE_ = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
SCREAMING_SNAKE_CASE_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE_ = f'''Unknown kernel: {kernel}'''
raise ValueError(snake_case__ )
def __a ( self : Tuple , snake_case__ : ndarray , snake_case__ : ndarray ):
"""simple docstring"""
return np.dot(snake_case__ , snake_case__ )
def __a ( self : int , snake_case__ : ndarray , snake_case__ : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __a ( self : int , snake_case__ : list[ndarray] , snake_case__ : ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = observations
SCREAMING_SNAKE_CASE_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE_) , ) = np.shape(snake_case__ )
def to_minimize(snake_case__ : ndarray ) -> float:
SCREAMING_SNAKE_CASE_ = 0
((SCREAMING_SNAKE_CASE_) , ) = np.shape(snake_case__ )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(snake_case__ )
SCREAMING_SNAKE_CASE_ = LinearConstraint(snake_case__ , 0 , 0 )
SCREAMING_SNAKE_CASE_ = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE_ = minimize(
snake_case__ , np.ones(snake_case__ ) , bounds=snake_case__ , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE_ = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE_ = 0
for i in range(snake_case__ ):
for j in range(snake_case__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE_ = s / n
def __a ( self : Any , snake_case__ : ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , snake_case__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
snake_case = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
snake_case = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
snake_case = False
snake_case = nn.Dropout(p=lowerCAmelCase )
snake_case = TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
snake_case = TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
snake_case = TaLayerNorm(lowerCAmelCase )
snake_case = nn.Dropout(p=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.token_embedder(lowerCAmelCase )
snake_case = encoder_input_tokens.shape[1]
snake_case = torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
snake_case = self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
snake_case = encoder_input_tokens.size()
snake_case = self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
snake_case = lyr(lowerCAmelCase , lowerCAmelCase )[0]
snake_case = self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 104 | """simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Dict:
"""simple docstring"""
def is_in_circle(_UpperCamelCase : float , _UpperCamelCase : float ) -> bool:
snake_case = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_UpperCamelCase : float ) -> float:
return x
snake_case = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> None:
"""simple docstring"""
def function_to_integrate(_UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
def UpperCamelCase ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = Path(a )
__magic_name__ = Path(a )
dest_dir.mkdir(exist_ok=a )
for path in src_dir.iterdir():
__magic_name__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
__magic_name__ = dest_dir.joinpath(path.name )
print(a )
dest_path.open('''w''' ).write('''\n'''.join(a ) )
if __name__ == "__main__":
fire.Fire(minify)
| 432 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor"""
__SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ):
super().__init__(a__ , a__ )
def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ):
__magic_name__ = kwargs.pop('''sampling_rate''' , a__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
a__ , sampling_rate=a__ , return_tensors=a__ , **a__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : Any ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 432 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A__ ( lowerCAmelCase__ ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = dataset
__lowercase = process
__lowercase = params
def __len__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.dataset[i]
__lowercase = self.process(_UpperCAmelCase , **self.params )
return processed
class A__ ( lowerCAmelCase__ ):
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=None ) -> str:
"""simple docstring"""
__lowercase = loader
__lowercase = infer
__lowercase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowercase = None
__lowercase = loader_batch_size
# Internal bookkeeping
__lowercase = None
__lowercase = None
def __len__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.loader )
def __iter__( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = iter(self.loader )
return self
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowercase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowercase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Convert ModelOutput to tuple first
__lowercase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowercase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowercase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowercase = self._loader_batch_data.__class__(_UpperCAmelCase )
self._loader_batch_index += 1
return result
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowercase = next(self.iterator )
__lowercase = self.infer(_UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_UpperCAmelCase , torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
# Setting internal index to unwrap the batch
__lowercase = processed
__lowercase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A__ ( lowerCAmelCase__ ):
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int=None ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __iter__( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = iter(self.loader )
__lowercase = None
return self
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
if self.subiterator is None:
__lowercase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__lowercase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowercase = self.infer(next(self.iterator ) , **self.params )
__lowercase = next(self.subiterator )
return processed
class A__ ( lowerCAmelCase__ ):
def __iter__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = iter(self.loader )
return self
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = False
__lowercase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
__lowercase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_UpperCAmelCase , torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
__lowercase = processed
__lowercase = 0
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
else:
__lowercase = processed
__lowercase = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
return accumulator
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Dataset , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = dataset
__lowercase = key
def __len__( self : int ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.dataset[i][self.key]
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : Dataset , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
__lowercase = dataset
__lowercase = keya
__lowercase = keya
def __len__( self : Dict ) -> Dict:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : str , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :CommonSchedulerState
# setable values
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
__magic_name__ :Optional[int] = None
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return cls(common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase )
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :DDPMSchedulerState
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__magic_name__ :jnp.dtype
@property
def snake_case ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __UpperCAmelCase = 1_0_0_0 , __UpperCAmelCase = 0.00_01 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "fixed_small" , __UpperCAmelCase = True , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = jnp.floataa , ):
'''simple docstring'''
lowerCAmelCase__ :int = dtype
def snake_case ( self , __UpperCAmelCase = None ):
'''simple docstring'''
if common is None:
lowerCAmelCase__ :Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase__ :Tuple = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase__ :List[str] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return sample
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = () ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ :Any = (jnp.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = state.common.alphas_cumprod[t]
lowerCAmelCase__ :List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase__ :List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase__ :Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase__ :Optional[Any] = jnp.clip(__UpperCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase__ :Optional[int] = jnp.log(jnp.clip(__UpperCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase__ :Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase__ :str = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase__ :int = variance
lowerCAmelCase__ :str = state.common.betas[t]
lowerCAmelCase__ :Union[str, Any] = (predicted_variance + 1) / 2
lowerCAmelCase__ :Dict = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = timestep
if key is None:
lowerCAmelCase__ :Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = jnp.split(__UpperCAmelCase , sample.shape[1] , axis=1 )
else:
lowerCAmelCase__ :Optional[int] = None
# 1. compute alphas, betas
lowerCAmelCase__ :Tuple = state.common.alphas_cumprod[t]
lowerCAmelCase__ :List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase__ :str = 1 - alpha_prod_t
lowerCAmelCase__ :int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ :Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase__ :str = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ :Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase__ :Optional[int] = jnp.clip(__UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ :Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase__ :Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ :Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase__ :List[Any] = jax.random.split(__UpperCAmelCase , num=1 )
lowerCAmelCase__ :Union[str, Any] = jax.random.normal(__UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCAmelCase , __UpperCAmelCase , predicted_variance=__UpperCAmelCase ) ** 0.5) * noise
lowerCAmelCase__ :Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase__ :str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCAmelCase , state=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 93 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Union[str, Any] = False
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Dict = "ybelkada/fonts"
def A () -> Union[str, Any]:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def A (__A : Dict , __A : int , __A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
_check_torch_version()
UpperCAmelCase_ = image_tensor.unsqueeze(0 )
UpperCAmelCase_ = torch.nn.functional.unfold(__A , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCAmelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __A , __A , -1 )
UpperCAmelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A (__A : str , __A : int = 36 , __A : str = "black" , __A : str = "white" , __A : int = 5 , __A : int = 5 , __A : int = 5 , __A : int = 5 , __A : Optional[bytes] = None , __A : Optional[str] = None , ) -> Image.Image:
"""simple docstring"""
requires_backends(__A , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
UpperCAmelCase_ = textwrap.TextWrapper(width=80 )
UpperCAmelCase_ = wrapper.wrap(text=__A )
UpperCAmelCase_ = '''\n'''.join(__A )
if font_bytes is not None and font_path is None:
UpperCAmelCase_ = io.BytesIO(__A )
elif font_path is not None:
UpperCAmelCase_ = font_path
else:
UpperCAmelCase_ = hf_hub_download(__A , '''Arial.TTF''' )
UpperCAmelCase_ = ImageFont.truetype(__A , encoding='''UTF-8''' , size=__A )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCAmelCase_ = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , __A ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = temp_draw.textbbox((0, 0) , __A , __A )
# Create the actual image with a bit of padding around the text.
UpperCAmelCase_ = text_width + left_padding + right_padding
UpperCAmelCase_ = text_height + top_padding + bottom_padding
UpperCAmelCase_ = Image.new('''RGB''' , (image_width, image_height) , __A )
UpperCAmelCase_ = ImageDraw.Draw(__A )
draw.text(xy=(left_padding, top_padding) , text=__A , fill=__A , font=__A )
return image
def A (__A : np.ndarray , __A : str , **__A : Any ) -> Tuple:
"""simple docstring"""
requires_backends(__A , '''vision''' )
# Convert to PIL image if necessary
UpperCAmelCase_ = to_pil_image(__A )
UpperCAmelCase_ = render_text(__A , **__A )
UpperCAmelCase_ = max(header_image.width , image.width )
UpperCAmelCase_ = int(image.height * (new_width / image.width) )
UpperCAmelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCAmelCase_ = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCAmelCase_ = to_numpy_array(__A )
if infer_channel_dimension_format(__A ) == ChannelDimension.LAST:
UpperCAmelCase_ = to_channel_dimension_format(__A , ChannelDimension.LAST )
return new_image
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = ['''flattened_patches''']
def __init__( self : str , _snake_case : bool = True , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : int = 2048 , _snake_case : bool = False , **_snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = max_patches
UpperCAmelCase_ = is_vqa
def lowerCamelCase ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : int , _snake_case : dict , **_snake_case : Any):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , '''torch''')
_check_torch_version()
# convert to torch
UpperCAmelCase_ = to_channel_dimension_format(_snake_case , ChannelDimension.FIRST)
UpperCAmelCase_ = torch.from_numpy(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = patch_size['''height'''], patch_size['''width''']
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_snake_case)
# maximize scale s.t.
UpperCAmelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
UpperCAmelCase_ = max(min(math.floor(scale * image_height / patch_height) , _snake_case) , 1)
UpperCAmelCase_ = max(min(math.floor(scale * image_width / patch_width) , _snake_case) , 1)
UpperCAmelCase_ = max(num_feasible_rows * patch_height , 1)
UpperCAmelCase_ = max(num_feasible_cols * patch_width , 1)
UpperCAmelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=_snake_case , antialias=_snake_case , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ = torch_extract_patches(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = patches.shape
UpperCAmelCase_ = patches_shape[1]
UpperCAmelCase_ = patches_shape[2]
UpperCAmelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
UpperCAmelCase_ = torch.arange(_snake_case).reshape([rows, 1]).repeat(1 , _snake_case).reshape([rows * columns, 1])
UpperCAmelCase_ = torch.arange(_snake_case).reshape([1, columns]).repeat(_snake_case , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCAmelCase_ = row_ids.to(torch.floataa)
UpperCAmelCase_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ = torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ = torch.nn.functional.pad(_snake_case , [0, 0, 0, max_patches - (rows * columns)]).float()
UpperCAmelCase_ = to_numpy_array(_snake_case)
return result
def lowerCamelCase ( self : str , _snake_case : np.ndarray , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str):
"""simple docstring"""
if image.dtype == np.uinta:
UpperCAmelCase_ = image.astype(np.floataa)
# take mean across the whole `image`
UpperCAmelCase_ = np.mean(_snake_case)
UpperCAmelCase_ = np.std(_snake_case)
UpperCAmelCase_ = max(_snake_case , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(_snake_case , mean=_snake_case , std=_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , _snake_case : ImageInput , _snake_case : Optional[str] = None , _snake_case : bool = None , _snake_case : Optional[bool] = None , _snake_case : Optional[int] = None , _snake_case : Optional[Dict[str, int]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCAmelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCAmelCase_ = self.is_vqa
if kwargs.get('''data_format''' , _snake_case) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''')
UpperCAmelCase_ = make_list_of_images(_snake_case)
if not valid_images(_snake_case):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(_snake_case) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_snake_case) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''')
UpperCAmelCase_ = kwargs.pop('''font_bytes''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''font_path''' , _snake_case)
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = [header_text] * len(_snake_case)
UpperCAmelCase_ = [
render_header(_snake_case , header_text[i] , font_bytes=_snake_case , font_path=_snake_case)
for i, image in enumerate(_snake_case)
]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_snake_case) for image in images]
# convert to torch tensor and permute
UpperCAmelCase_ = [
self.extract_flattened_patches(image=_snake_case , max_patches=_snake_case , patch_size=_snake_case)
for image in images
]
# create attention mask in numpy
UpperCAmelCase_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
UpperCAmelCase_ = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=_snake_case)
return encoded_outputs
| 169 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''')
UpperCAmelCase_ = img
UpperCAmelCase_ = img.shape[1]
UpperCAmelCase_ = img.shape[0]
UpperCAmelCase_ = dst_width
UpperCAmelCase_ = dst_height
UpperCAmelCase_ = self.src_w / self.dst_w
UpperCAmelCase_ = self.src_h / self.dst_h
UpperCAmelCase_ = UpperCAmelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def lowerCamelCase ( self : str):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
UpperCAmelCase_ = self.img[self.get_y(_snake_case)][self.get_x(_snake_case)]
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
return int(self.ratio_x * x)
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
snake_case_ , snake_case_ : List[Any] = 800, 600
snake_case_ : Optional[Any] = imread("image_data/lena.jpg", 1)
snake_case_ : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 169 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = '▁'
snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : List[str] = BigBirdTokenizer
UpperCamelCase : Any = BigBirdTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """<s>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __magic_name__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __magic_name__ ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """Hello World!"""
__lowercase = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
__lowercase = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __magic_name__ ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowercase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__lowercase = """ """.join(__UpperCAmelCase )
__lowercase = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=__UpperCAmelCase )
__lowercase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__UpperCAmelCase )
__lowercase = BigBirdConfig(attention_type="""original_full""" )
__lowercase = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
__lowercase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 566 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case : str = '>>zh<<'
snake_case : Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
snake_case : Optional[Any] = 'pt'
elif is_tf_available():
snake_case : Optional[int] = 'tf'
else:
snake_case : Optional[Any] = 'jax'
@require_sentencepiece
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='transfo-xl'
_lowerCAmelCase =['mems']
_lowerCAmelCase ={
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , _lowerCamelCase : int=2_6_7_7_3_5 , _lowerCamelCase : Any=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , _lowerCamelCase : List[Any]=1_0_2_4 , _lowerCamelCase : List[str]=1_0_2_4 , _lowerCamelCase : Tuple=1_6 , _lowerCamelCase : Dict=6_4 , _lowerCamelCase : List[Any]=4_0_9_6 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=1_8 , _lowerCamelCase : Any=1_6_0_0 , _lowerCamelCase : List[str]=1_0_0_0 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : str=-1 , _lowerCamelCase : Any=True , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Union[str, Any]="normal" , _lowerCamelCase : Any=0.01 , _lowerCamelCase : Optional[int]=0.01 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Dict=1E-5 , _lowerCamelCase : Dict=0 , **_lowerCamelCase : Union[str, Any] , ):
snake_case__ : Any = vocab_size
snake_case__ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
snake_case__ : Any = [False] + [True] * len(self.cutoffs )
else:
snake_case__ : List[str] = [False] + [False] * len(self.cutoffs )
snake_case__ : int = d_model
snake_case__ : Optional[int] = d_embed
snake_case__ : str = d_head
snake_case__ : List[str] = d_inner
snake_case__ : Optional[int] = div_val
snake_case__ : Optional[int] = pre_lnorm
snake_case__ : Dict = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : Optional[int] = mem_len
snake_case__ : int = same_length
snake_case__ : List[str] = attn_type
snake_case__ : List[str] = clamp_len
snake_case__ : Dict = sample_softmax
snake_case__ : List[Any] = adaptive
snake_case__ : Tuple = dropout
snake_case__ : str = dropatt
snake_case__ : int = untie_r
snake_case__ : List[str] = init
snake_case__ : Any = init_range
snake_case__ : str = proj_init_std
snake_case__ : int = init_std
snake_case__ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 716 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase : str = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowerCamelCase : Optional[Any] = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase : Dict = soup.find('meta', {'property': 'og:image'})['content']
lowerCamelCase : str = requests.get(image_url).content
lowerCamelCase : Union[str, Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 303 | 0 |
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : list )->float:
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
def UpperCamelCase__( UpperCamelCase__ : list[float] )->None:
if point:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for item in point:
if not isinstance(UpperCamelCase__ , (int, float) ):
A__ = (
'''Expected a list of numbers as input, found '''
f"{type(UpperCamelCase__ ).__name__}"
)
raise TypeError(UpperCamelCase__ )
else:
A__ = f"Expected a list of numbers as input, found {type(UpperCamelCase__ ).__name__}"
raise TypeError(UpperCamelCase__ )
else:
raise ValueError('''Missing an input''' )
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : list )->float:
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__: Tuple = TypeVar('T')
def UpperCamelCase__( UpperCamelCase__ : int )->int:
return (position - 1) // 2
def UpperCamelCase__( UpperCamelCase__ : int )->int:
return (2 * position) + 1
def UpperCamelCase__( UpperCamelCase__ : int )->int:
return (2 * position) + 2
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__( self ):
A__ = []
A__ = {}
A__ = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def UpperCamelCase ( self ):
# Check if the priority queue is empty
return self.elements == 0
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
A__ = self.elements
self.elements += 1
self._bubble_up(__lowerCamelCase )
def UpperCamelCase ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0,self.elements - 1 )
A__ , A__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A__ , A__ = self.heap[0]
self._bubble_down(__lowerCamelCase )
return elem
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# Update the weight of the given key
A__ = self.position_map[elem]
A__ = (elem, weight)
if position > 0:
A__ = get_parent_position(__lowerCamelCase )
A__ , A__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCamelCase )
else:
self._bubble_down(__lowerCamelCase )
else:
self._bubble_down(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
A__ = self.position_map[elem]
if curr_pos == 0:
return None
A__ = get_parent_position(__lowerCamelCase )
A__ , A__ = self.heap[curr_pos]
A__ , A__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCamelCase,__lowerCamelCase )
return self._bubble_up(__lowerCamelCase )
return None
def UpperCamelCase ( self,__lowerCamelCase ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
A__ = self.position_map[elem]
A__ , A__ = self.heap[curr_pos]
A__ = get_child_left_position(__lowerCamelCase )
A__ = get_child_right_position(__lowerCamelCase )
if child_left_position < self.elements and child_right_position < self.elements:
A__ , A__ = self.heap[child_left_position]
A__ , A__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCamelCase,__lowerCamelCase )
return self._bubble_down(__lowerCamelCase )
if child_left_position < self.elements:
A__ , A__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCamelCase,__lowerCamelCase )
return self._bubble_down(__lowerCamelCase )
else:
return None
if child_right_position < self.elements:
A__ , A__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCamelCase,__lowerCamelCase )
return self._bubble_down(__lowerCamelCase )
return None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# Swap the nodes at the given positions
A__ = self.heap[nodea_pos][0]
A__ = self.heap[nodea_pos][0]
A__ , A__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A__ = nodea_pos
A__ = nodea_pos
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__( self ):
A__ = {}
A__ = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def UpperCamelCase ( self,__lowerCamelCase ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
A__ = {}
self.nodes += 1
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCamelCase )
self.add_node(__lowerCamelCase )
A__ = weight
A__ = weight
def UpperCamelCase__( UpperCamelCase__ : GraphUndirectedWeighted[T] , )->tuple[dict[T, int], dict[T, T | None]]:
A__ = {node: maxsize for node in graph.connections}
A__ = {node: None for node in graph.connections}
A__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase__ , UpperCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
A__ = priority_queue.extract_min()
A__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
A__ = node
# running prim's algorithm
while not priority_queue.is_empty():
A__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
A__ = node
return dist, parent
| 190 | 1 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCamelCase = """bert-base-cased"""
lowerCamelCase = """fp16"""
lowerCamelCase = """bf16"""
lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = F"""{i + 1}"""
UpperCAmelCase_ = strategy
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = prefetch_policy
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = state_dict_type
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = AutoModel.from_pretrained(_UpperCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase_ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase_ = "2000"
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = "TRANSFORMER_BASED_WRAP"
UpperCAmelCase_ = "T5Layer"
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(_UpperCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = "SIZE_BASED_WRAP"
UpperCAmelCase_ = "0"
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = mp_dtype
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase_ = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase_ = torch.bfloataa
UpperCAmelCase_ = MixedPrecision(param_dtype=_UpperCAmelCase , reduce_dtype=_UpperCAmelCase , buffer_dtype=_UpperCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _UpperCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _UpperCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase_ = self.dist_env.copy()
UpperCAmelCase_ = str(_UpperCAmelCase ).lower()
with mockenv_context(**_UpperCAmelCase ):
UpperCAmelCase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_UpperCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = 0.82
UpperCAmelCase_ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
UpperCAmelCase_ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase_ = 160
UpperCAmelCase_ = 160
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = os.path.join(self.test_scripts_folder , "test_performance.py" )
UpperCAmelCase_ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
UpperCAmelCase_ = cmd.copy()
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
UpperCAmelCase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase_ = len(_UpperCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase_ = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
UpperCAmelCase_ = cmd_config[:-1]
UpperCAmelCase_ = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
UpperCAmelCase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 14 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14 | 1 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
_lowercase : Any = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowercase : Any = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Optional[int] = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 1 |
# flake8: noqa
# Lint as: python3
lowercase_ = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 74 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Any =None
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Union[str, Any] ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[str] ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__ : List[str] ='''▁'''
# Segments (not really needed)
A__ : str =0
A__ : str =1
A__ : List[Any] =2
A__ : str =3
A__ : Optional[Any] =4
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = VOCAB_FILES_NAMES
_lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = '''left'''
_lowercase: Dict = XLNetTokenizer
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False , __snake_case : str=True , __snake_case : Union[str, Any]=False , __snake_case : List[Any]="<s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<unk>" , __snake_case : int="<sep>" , __snake_case : int="<pad>" , __snake_case : Dict="<cls>" , __snake_case : int="<mask>" , __snake_case : Optional[int]=["<eop>", "<eod>"] , **__snake_case : List[str] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 207 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
import os
import numpy
import onnx
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = a.name
_SCREAMING_SNAKE_CASE : Tuple = b.name
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : int = ""
_SCREAMING_SNAKE_CASE : List[str] = a == b
_SCREAMING_SNAKE_CASE : Union[str, Any] = name_a
_SCREAMING_SNAKE_CASE : Tuple = name_b
return res
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCamelCase, __lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, __lowerCamelCase, __lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g, __lowerCamelCase, __lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, __lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(model.graph.initializer )
_SCREAMING_SNAKE_CASE : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_SCREAMING_SNAKE_CASE : List[str] = inits[i].name
_SCREAMING_SNAKE_CASE : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, __lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = os.path.basename(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = onnx.load(os.path.join(__lowerCamelCase, __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = list(model.graph.initializer )
_SCREAMING_SNAKE_CASE : Optional[int] = set()
_SCREAMING_SNAKE_CASE : str = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : List[Any] = 0
for i in range(len(__lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(__lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(__lowerCamelCase )
dup_set.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = inits[j].data_type
_SCREAMING_SNAKE_CASE : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: ", __lowerCamelCase )
total_reduced_size += mem_size
_SCREAMING_SNAKE_CASE : Dict = inits[i].name
_SCREAMING_SNAKE_CASE : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: ", total_reduced_size / 1024 / 1024 / 1024, "GB" )
_SCREAMING_SNAKE_CASE : List[Any] = sorted(__lowerCamelCase )
_remove_dup_initializers_from_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = "optimized_" + model_file_name
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(__lowerCamelCase, __lowerCamelCase )
onnx.save(__lowerCamelCase, __lowerCamelCase )
return new_model | 249 | """simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE ( snake_case):
if isinstance(snake_case, collections.abc.Iterable):
return x
return (x, x)
@require_tf
class _A :
"""simple docstring"""
def lowercase ( self : List[Any] , A_ : Optional[Any] , A_ : int ) -> Any:
pass
def lowercase ( self : List[Any] ) -> Union[str, Any]:
pass
def lowercase ( self : Any ) -> Union[str, Any]:
pass
def lowercase ( self : List[str] , A_ : int , A_ : Tuple , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple=None , **A_ : str ) -> Tuple:
__snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : int , A_ : int , A_ : Union[str, Any]=None , **A_ : Union[str, Any] ) -> List[str]:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Tuple , A_ : Any , A_ : Dict , A_ : Any , A_ : Optional[Any] , A_ : Optional[int]=None , **A_ : str ) -> Optional[Any]:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = {'''vision_model''': vision_model, '''text_model''': text_model}
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : str , A_ : str , A_ : Optional[Any] , A_ : Any , A_ : Optional[int] , A_ : Tuple=None , **A_ : int ) -> int:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
__snake_case = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__snake_case = TFVisionTextDualEncoderModel.from_pretrained(A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
__snake_case = after_output[0].numpy()
__snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
def lowercase ( self : List[str] , A_ : str , A_ : Dict , A_ : List[str] , A_ : str , A_ : int=None , **A_ : Union[str, Any] ) -> List[str]:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(
input_ids=A_ , pixel_values=A_ , attention_mask=A_ , output_attentions=A_ )
__snake_case = output.vision_model_output.attentions
self.assertEqual(len(A_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = to_atuple(vision_model.config.image_size )
__snake_case = to_atuple(vision_model.config.patch_size )
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__snake_case = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__snake_case = output.text_model_output.attentions
self.assertEqual(len(A_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : Dict , A_ : np.ndarray , A_ : np.ndarray , A_ : float ) -> Union[str, Any]:
__snake_case = np.abs((a - b) ).max()
self.assertLessEqual(A_ , A_ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**A_ )
def lowercase ( self : Optional[int] ) -> int:
__snake_case = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A_ )
def lowercase ( self : List[str] ) -> Union[str, Any]:
__snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A_ )
def lowercase ( self : List[str] ) -> int:
__snake_case = self.prepare_config_and_inputs()
self.check_save_load(**A_ )
def lowercase ( self : Optional[int] ) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A_ )
@slow
def lowercase ( self : Any ) -> Any:
__snake_case , __snake_case = self.get_pretrained_model_and_inputs()
__snake_case = model_a(**A_ )
__snake_case = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A_ )
__snake_case = TFVisionTextDualEncoderModel.from_pretrained(A_ )
__snake_case = model_a(**A_ )
__snake_case = after_outputs[0].numpy()
__snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
@require_tf
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> List[str]:
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__snake_case = 13
__snake_case = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case = random_attention_mask([batch_size, 4] )
__snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : str , A_ : Optional[int] , A_ : Tuple ) -> str:
__snake_case = TFViTModel(A_ , name='''vision_model''' )
__snake_case = TFBertModel(A_ , name='''text_model''' )
return vision_model, text_model
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = TFViTModelTester(self )
__snake_case = TFBertModelTester(self )
__snake_case = vit_model_tester.prepare_config_and_inputs()
__snake_case = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> int:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__snake_case = 13
__snake_case = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case = random_attention_mask([batch_size, 4] )
__snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : Dict , A_ : Union[str, Any] , A_ : Tuple , A_ : Union[str, Any] , A_ : str , A_ : List[Any]=None , **A_ : List[Any] ) -> int:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(
input_ids=A_ , pixel_values=A_ , attention_mask=A_ , output_attentions=A_ )
__snake_case = output.vision_model_output.attentions
self.assertEqual(len(A_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__snake_case = to_atuple(vision_model.config.image_size )
__snake_case = to_atuple(vision_model.config.patch_size )
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__snake_case = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__snake_case = output.text_model_output.attentions
self.assertEqual(len(A_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : str , A_ : Union[str, Any] , A_ : Any ) -> Tuple:
__snake_case = TFDeiTModel(A_ , name='''vision_model''' )
__snake_case = TFRobertaModel(A_ , name='''text_model''' )
return vision_model, text_model
def lowercase ( self : Tuple ) -> List[str]:
__snake_case = TFDeiTModelTester(self )
__snake_case = TFRobertaModelTester(self )
__snake_case = vit_model_tester.prepare_config_and_inputs()
__snake_case = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Dict:
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__snake_case = 13
__snake_case = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case = random_attention_mask([batch_size, 4] )
__snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
__snake_case = TFCLIPVisionModel(A_ , name='''vision_model''' )
__snake_case = TFBertModel(A_ , name='''text_model''' )
return vision_model, text_model
def lowercase ( self : Dict ) -> int:
__snake_case = TFCLIPVisionModelTester(self )
__snake_case = TFBertModelTester(self )
__snake_case = clip_model_tester.prepare_config_and_inputs()
__snake_case = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__snake_case = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=A_ )
__snake_case = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__snake_case = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=A_ , padding=A_ , return_tensors='''np''' )
__snake_case = model(**A_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__snake_case = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , A_ , atol=1E-3 ) ) | 564 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any )-> Dict:
A__ = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ = old_name.split('''.''' )
if layer == "0":
A__ = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
A__ = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
A__ = old_name.replace('''3''' , '''convolution2''' )
else:
A__ = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , UpperCamelCase_ ):
A__ = r'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase_ , UpperCamelCase_ ) ):
A__ = re.search(r'''\d\.\d\d.''' , UpperCamelCase_ ).group()
else:
A__ = re.search(r'''\d\.\d.''' , UpperCamelCase_ ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(UpperCamelCase_ , '''''' )
A__ = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
A__ = '''intermediate_stages.''' + trimmed_name
else:
A__ = old_name.replace(UpperCamelCase_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
A__ = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
A__ = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
A__ = trimmed_name.replace('''fc2''' , '''linear_out''' )
A__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , UpperCamelCase_ ):
A__ = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
A__ = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
A__ = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
A__ = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
A__ = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
A__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace('''norm''' , '''layernorm''' )
A__ = '''efficientformer.''' + new_name
else:
A__ = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : int )-> Any:
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(UpperCamelCase_ )
A__ = val
return checkpoint
def lowerCAmelCase__ ( )-> Any:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return image
def lowerCAmelCase__ ( UpperCamelCase_ : Path , UpperCamelCase_ : Path , UpperCamelCase_ : Path , UpperCamelCase_ : bool )-> str:
A__ = torch.load(UpperCamelCase_ , map_location='''cpu''' )['''model''']
A__ = EfficientFormerConfig.from_json_file(UpperCamelCase_ )
A__ = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase_ )
A__ = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(UpperCamelCase_ , UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
A__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 2_5_6
A__ = 2_2_4
A__ = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
A__ = processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(UpperCamelCase_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase_ ),
ToTensor(),
Normalize(UpperCamelCase_ , UpperCamelCase_ ),
] )
A__ = image_transforms(UpperCamelCase_ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
A__ = model(UpperCamelCase_ )
A__ = outputs.logits
A__ = (1, 1_0_0_0)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , UpperCamelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , UpperCamelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(UpperCamelCase_ )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add model''' , use_temp_dir=UpperCamelCase_ , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase_ , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowercase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 526 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = "▁"
_lowercase = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowercase = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowercase = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowercase = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowercase = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = ["input_ids"]
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , a__ , a__=None , a__=False , a__="utf8" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , vocab_file=a__ , encoding=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
A__ = do_lower_case
A__ = sentencepiece_model_ckpt
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a__)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A__ = self.load_vocab(filepath=a__)
else:
A__ = {self.sp_model.id_to_piece(a__): id for id in range(self.sp_model.get_piece_size())}
A__ = {v: k for k, v in self.vocab.items()}
def snake_case_ ( self , a__):
if text is None:
return None
A__ = self.tokenize(a__)
A__ , A__ = '''''', []
for i, ch in enumerate(a__):
if ch in self.SP_CHAR_MAPPING:
A__ = self.SP_CHAR_MAPPING.get(a__)
else:
A__ = unicodedata.normalize('''NFKC''' , a__)
if self.is_whitespace(a__):
continue
normalized_text += ch
char_mapping.extend([i] * len(a__))
A__ , A__ , A__ = normalized_text, [], 0
if self.do_lower_case:
A__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A__ = token[1:]
A__ = text[offset:].index(a__) + offset
A__ = start + len(a__)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
A__ = end
return token_mapping
@property
def snake_case_ ( self):
return len(self.vocab)
def snake_case_ ( self):
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , a__):
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def snake_case_ ( self , a__):
return "".join((self.SP_CHAR_MAPPING.get(a__ , a__) for c in text))
def snake_case_ ( self , a__ , a__=False , a__=6_4 , a__=0.1):
if self.sp_model_kwargs.get('''enable_sampling''') is True:
A__ = True
if self.sp_model_kwargs.get('''alpha''') is not None:
A__ = self.sp_model_kwargs.get('''alpha''')
if self.sp_model_kwargs.get('''nbest_size''') is not None:
A__ = self.sp_model_kwargs.get('''nbest_size''')
if not enable_sampling:
A__ = self.sp_model.EncodeAsPieces(a__)
else:
A__ = self.sp_model.SampleEncodeAsPieces(a__ , a__ , a__)
A__ = []
for pi, piece in enumerate(a__):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(a__) and pi != 0:
new_pieces.append(a__)
continue
else:
continue
A__ = 0
for i, chunk in enumerate(a__):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(a__) or self.is_punct(a__):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(a__)
A__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
A__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
A__ = i
if len(a__) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def snake_case_ ( self , a__):
A__ = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def snake_case_ ( self , a__):
A__ = self.convert_ids_to_tokens(a__)
A__ = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def snake_case_ ( self , a__):
return self.vocab.get(a__ , self.vocab.get(self.unk_token))
def snake_case_ ( self , a__):
return self.reverse_vocab.get(a__ , self.unk_token)
def snake_case_ ( self , a__ , a__=None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case_ ( self , a__ , a__=None):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case_ ( self , a__ , a__=None , a__=False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__)) + [1, 1] + ([0] * len(a__)) + [1]
return [1] + ([0] * len(a__)) + [1]
def snake_case_ ( self , a__ , a__ = None):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(a__) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(a__) + 1) + [1] * (len(a__) + 3)
def snake_case_ ( self , a__):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case_ ( self , a__):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case_ ( self , a__):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case_ ( self , a__):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(a__) == 1:
A__ = unicodedata.category(a__)
if cat == "Zs":
return True
return False
def snake_case_ ( self , a__):
A__ = {}
with io.open(a__ , '''r''' , encoding='''utf-8''') as f:
for index, line in enumerate(a__):
A__ = line.rstrip('''\n''')
A__ = int(a__)
return token_to_idx
def snake_case_ ( self , a__ , a__ = None):
A__ = 0
if os.path.isdir(a__):
A__ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
A__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(a__ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda a__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''')
A__ = token_index
writer.write(token + '''\n''')
index += 1
A__ = os.path.join(a__ , '''sentencepiece.bpe.model''')
with open(a__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(a__)
return (vocab_file,)
| 526 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'bart'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , _lowerCamelCase=50265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=3 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : str = encoder_ffn_dim
UpperCAmelCase__ : List[str] = encoder_layers
UpperCAmelCase__ : Optional[int] = encoder_attention_heads
UpperCAmelCase__ : Optional[int] = decoder_ffn_dim
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : int = decoder_attention_heads
UpperCAmelCase__ : Optional[Any] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : List[str] = init_std
UpperCAmelCase__ : Dict = encoder_layerdrop
UpperCAmelCase__ : Any = decoder_layerdrop
UpperCAmelCase__ : List[str] = classifier_dropout
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : Union[str, Any] = encoder_layers
UpperCAmelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ : Dict = {0: """batch"""}
UpperCAmelCase__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Any = super().outputs
else:
UpperCAmelCase__ : Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
UpperCAmelCase__ : int = seq_length if not self.use_past else 1
UpperCAmelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : Union[str, Any] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : str = common_inputs["""input_ids"""].shape
UpperCAmelCase__ : int = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_attention_heads
UpperCAmelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = decoder_seq_length + 3
UpperCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
UpperCAmelCase__ : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
UpperCAmelCase__ : Dict = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
UpperCAmelCase__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : int = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ : str = self.num_attention_heads
UpperCAmelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase__ : List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
UpperCAmelCase__ : int = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : Optional[int] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
UpperCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
UpperCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Union[str, Any] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ : Optional[int] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 182 |
"""simple docstring"""
_A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[Any] = input("""Enter message: """ )
UpperCAmelCase__ : Optional[Any] = input("""Enter key [alphanumeric]: """ )
UpperCAmelCase__ : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCAmelCase__ : Optional[Any] = """encrypt"""
UpperCAmelCase__ : List[Any] = encrypt_message(lowerCAmelCase , lowerCAmelCase )
elif mode.lower().startswith("""d""" ):
UpperCAmelCase__ : Optional[Any] = """decrypt"""
UpperCAmelCase__ : Dict = decrypt_message(lowerCAmelCase , lowerCAmelCase )
print(F"""\n{mode.title()}ed message:""" )
print(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
return translate_message(lowerCAmelCase , lowerCAmelCase , """encrypt""" )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
return translate_message(lowerCAmelCase , lowerCAmelCase , """decrypt""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = key.upper()
for symbol in message:
UpperCAmelCase__ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase ):
UpperCAmelCase__ : List[str] = 0
else:
translated.append(lowerCAmelCase )
return "".join(lowerCAmelCase )
if __name__ == "__main__":
main()
| 182 | 1 |
"""simple docstring"""
import json
import sys
def lowercase (_snake_case ,_snake_case ) -> Optional[Any]:
'''simple docstring'''
with open(_snake_case ,encoding="utf-8" ) as f:
__UpperCamelCase = json.load(_snake_case )
__UpperCamelCase = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_snake_case ):
__UpperCamelCase = results[benchmark_name]
__UpperCamelCase = benchmark_name.split("/" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
__UpperCamelCase = "| metric |"
__UpperCamelCase = "|--------|"
__UpperCamelCase = "| new / old (diff) |"
for metric_name in sorted(_snake_case ):
__UpperCamelCase = benchmark_res[metric_name]
__UpperCamelCase = metric_vals["new"]
__UpperCamelCase = metric_vals.get("old" ,_snake_case )
__UpperCamelCase = metric_vals.get("diff" ,_snake_case )
__UpperCamelCase = f""" {new_val:f}""" if isinstance(_snake_case ,(int, float) ) else "None"
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(_snake_case ,(int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(_snake_case ,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_snake_case ,"w" ,encoding="utf-8" ) as f:
f.writelines("\n".join(_snake_case ) )
if __name__ == "__main__":
_A : Union[str, Any] = sys.argv[1]
_A : Optional[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 705 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] )-> Tuple:
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCamelCase = test_metrics
@require_cpu
def A ( self : List[str] )-> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def A ( self : Optional[Any] )-> Union[str, Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def A ( self : Tuple )-> Optional[int]:
self.test_metrics.main()
@require_multi_gpu
def A ( self : str )-> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
__UpperCamelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() ) | 228 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = int(UpperCAmelCase_ )
assert noofclusters < len(UpperCAmelCase_ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE:Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE:str = list(range(len(UpperCAmelCase_ ) ) )
shuffle(UpperCAmelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE:Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE:Dict = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE:Union[str, Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE:List[str] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE:Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE:int = [tf.Variable(0 ) for i in range(len(UpperCAmelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE:List[str] = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE:Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE:str = tf.reduce_mean(UpperCAmelCase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE:Any = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE:Optional[int] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE:str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase_ , UpperCAmelCase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE:List[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE:Optional[Any] = tf.argmin(UpperCAmelCase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE:Optional[int] = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE:Any = 100
for _ in range(UpperCAmelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE:str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE:str = [
sess.run(UpperCAmelCase_ , feed_dict={va: vect, va: sess.run(UpperCAmelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE:List[Any] = sess.run(
UpperCAmelCase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase_ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE:int = [
vectors[i]
for i in range(len(UpperCAmelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE:Any = sess.run(
UpperCAmelCase_ , feed_dict={mean_input: array(UpperCAmelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE:List[Any] = sess.run(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE:Dict = sess.run(UpperCAmelCase_ )
return centroids, assignments
| 143 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = GPTSwaTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "This is a test"
lowerCamelCase_ = "This is a test"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "<s>"
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCamelCase ) , 2000 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCamelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
| 675 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = '''swin'''
lowercase_ : Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , __lowerCAmelCase : List[str]=2_2_4 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=9_6 , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Tuple=[3, 6, 1_2, 2_4] , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Union[str, Any]=4.0 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : str , ):
super().__init__(**__lowerCAmelCase )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__lowerCAmelCase )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
__snake_case = ['stem'] + [F'stage{idx}' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = version.parse('''1.11''' )
@property
def lowercase__ ( self : Optional[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 427 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a_ :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=1_3 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=1_9 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[int]=1_6 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Any=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowercase__ ( self : int ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ):
__snake_case = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowerCAmelCase , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
__snake_case = EsmForProteinFolding(config=__lowerCAmelCase ).float()
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Dict = False
lowercase_ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase_ : List[str] = ()
lowercase_ : List[str] = {} if is_torch_available() else {}
lowercase_ : List[str] = False
def lowercase__ ( self : List[Any] ):
__snake_case = EsmFoldModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip('Does not support attention outputs' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip
def lowercase__ ( self : Any ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase__ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase__ ( self : Any ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase__ ( self : str ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Optional[int] ):
pass
@require_torch
class a_ ( UpperCAmelCase__ ):
@slow
def lowercase__ ( self : Optional[int] ):
__snake_case = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__snake_case = model(__lowerCAmelCase )['positions']
__snake_case = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowerCAmelCase , atol=1E-4 ) )
| 427 | 1 |
"""simple docstring"""
def _A ( _a : Optional[int] ):
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
A = [True] * (num + 1)
A = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _a ):
A = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase =int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 617 | '''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__a: Optional[int] = logging.get_logger(__name__)
__a: Any = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "t5"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __lowerCAmelCase=32128 , __lowerCAmelCase=512 , __lowerCAmelCase=64 , __lowerCAmelCase=2048 , __lowerCAmelCase=6 , __lowerCAmelCase=None , __lowerCAmelCase=8 , __lowerCAmelCase=32 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=1.0 , __lowerCAmelCase="relu" , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , **__lowerCAmelCase , ) -> Optional[int]:
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : int = d_kv
lowercase__ : List[str] = d_ff
lowercase__ : Optional[Any] = num_layers
lowercase__ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : Optional[Any] = num_heads
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : str = dropout_rate
lowercase__ : Tuple = layer_norm_epsilon
lowercase__ : List[str] = initializer_factor
lowercase__ : Dict = feed_forward_proj
lowercase__ : Any = use_cache
lowercase__ : Optional[int] = self.feed_forward_proj.split('''-''' )
lowercase__ : List[Any] = act_info[-1]
lowercase__ : Optional[int] = act_info[0] == '''gated'''
if len(__lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase , )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]:
lowercase__ : int = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase__ : Any = '''past_encoder_sequence + sequence'''
lowercase__ : List[Any] = {0: '''batch'''}
lowercase__ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase__ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
return common_inputs
@property
def _lowerCAmelCase( self ) -> int:
return 13
| 152 | 0 |
def a__ ( __UpperCamelCase = 4_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ = [0, 1]
SCREAMING_SNAKE_CASE_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE_ = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 356 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase (yaml.SafeLoader ):
"""simple docstring"""
def __A ( self : str , __magic_name__ : str ) -> str:
SCREAMING_SNAKE_CASE_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ = [tuple(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else key for key in keys]
SCREAMING_SNAKE_CASE_ = Counter(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __A ( self : int , __magic_name__ : int , __magic_name__ : List[str]=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = super().construct_mapping(__magic_name__ , deep=__magic_name__ )
self._check_no_duplicates_on_constructed_node(__magic_name__ )
return mapping
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCamelCase )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __A ( cls : Dict , __magic_name__ : Path ) -> "DatasetMetadata":
with open(__magic_name__ , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__magic_name__ )
else:
return cls()
def __A ( self : str , __magic_name__ : Path ) -> List[str]:
if path.exists():
with open(__magic_name__ , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self._to_readme(__magic_name__ )
with open(__magic_name__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__magic_name__ )
def __A ( self : Any , __magic_name__ : Optional[str] = None ) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _split_yaml_from_readme(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __A ( cls : List[Any] , __magic_name__ : str ) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE_ = yaml.load(__magic_name__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__magic_name__ )
def __A ( self : Optional[Any] ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__magic_name__ , allow_unicode=__magic_name__ , encoding="utf-8" , ).decode("utf-8" )
A : List[Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A : Optional[Any] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
A : Union[str, Any] = ap.parse_args()
A : Union[str, Any] = Path(args.readme_filepath)
A : List[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 356 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[Any] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_A = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def A_ ( __SCREAMING_SNAKE_CASE : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__SCREAMING_SNAKE_CASE : Dict = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__SCREAMING_SNAKE_CASE : str = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 158 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 587 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = StableDiffusionXLImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
_lowerCAmelCase : Optional[Any] = CLIPTextModel(_snake_case )
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : Optional[int] = CLIPTextModelWithProjection(_snake_case )
_lowerCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=0 ):
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : str = image / 2 + 0.5
if str(_snake_case ).startswith("mps" ):
_lowerCAmelCase : str = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : Union[str, Any] = sd_pipe(**_snake_case ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : int = sd_pipe.to(_snake_case )
_lowerCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
_lowerCAmelCase : Dict = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : str = 3 * ["this is a negative prompt"]
_lowerCAmelCase : str = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs["prompt"]]
_lowerCAmelCase : Tuple = sd_pipe(**_snake_case )
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : int = 3 * ["this is a negative prompt"]
_lowerCAmelCase : List[str] = 3 * [inputs.pop("prompt" )]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = sd_pipe.encode_prompt(_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase : int = sd_pipe(
**_snake_case , prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , pooled_prompt_embeds=_snake_case , negative_pooled_prompt_embeds=_snake_case , )
_lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
_lowerCAmelCase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : Any = np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : Tuple = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase : Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : int = self.get_inputs(_snake_case )
_lowerCAmelCase : List[str] = pipe(**_snake_case ).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 587 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Optional[int] = 0.00
UpperCAmelCase__ : Tuple = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase__ : List[Any] = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_lowercase )
first_sum += 1 / float(_lowercase )
index += 1
return 1 / first_sum
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Tuple = 0.00
UpperCAmelCase__ : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase__ : Tuple = F"""Resistor at index {index} has a negative value!"""
raise ValueError(_lowercase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
__A : Any = tempfile.mkdtemp()
# fmt: off
__A : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__A : int = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__A : List[str] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _a ( self , **__lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _a ( self , **__lowerCamelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : List[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_tokenizer()
__A : Any = self.get_image_processor()
__A : Dict = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__A : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def _a ( self ):
'''simple docstring'''
__A : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Any = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
__A : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def _a ( self ):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__A : Tuple = self.prepare_image_inputs()
__A : Optional[Any] = image_processor(UpperCamelCase__ , return_tensors='np' )
__A : List[str] = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self ):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : str = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__A : Optional[int] = '''lower newer'''
__A : List[Any] = processor(text=UpperCamelCase__ )
__A : Optional[Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__A : Optional[int] = '''lower newer'''
__A : int = self.prepare_image_inputs()
__A : List[str] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def _a ( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Dict = self.get_tokenizer()
__A : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Dict = processor.batch_decode(UpperCamelCase__ )
__A : List[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__A : int = '''lower newer'''
__A : Optional[int] = self.prepare_image_inputs()
__A : str = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 712 | """simple docstring"""
import math
def _lowercase ( _SCREAMING_SNAKE_CASE : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( _SCREAMING_SNAKE_CASE : int = 10001 ) -> int:
'''simple docstring'''
try:
__A : str = int(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__A : list[int] = []
__A : Dict = 2
while len(_SCREAMING_SNAKE_CASE ) < nth:
if is_prime(_SCREAMING_SNAKE_CASE ):
primes.append(_SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(_SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 237 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = 16 ):
"""simple docstring"""
lowercase_ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase_ : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : str = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : str = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Tuple = 8
else:
lowercase_ : Dict = None
return tokenizer.pad(
_UpperCamelCase , padding="longest" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase_ : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
lowercase_ : str = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCamelCase ) == "1":
lowercase_ : Tuple = 2
# Initialize accelerator
lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : str = config["lr"]
lowercase_ : Optional[int] = int(config["num_epochs"] )
lowercase_ : List[str] = int(config["seed"] )
lowercase_ : List[Any] = int(config["batch_size"] )
lowercase_ : List[Any] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Tuple = AdamW(params=model.parameters() , lr=_UpperCamelCase )
lowercase_ , lowercase_ : Any = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
lowercase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : Optional[Any] = model(**_UpperCamelCase )
lowercase_ : List[Any] = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Optional[int] = model(**_UpperCamelCase )
lowercase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
lowercase_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowercase_ : int = parser.parse_args()
lowercase_ : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 620 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase_ = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCAmelCase_ = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCAmelCase_ = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = 0.0
for i, j in zip(UpperCamelCase , UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase , UpperCamelCase ) else 0.0
_snake_case : Dict = n_correct / len(UpperCamelCase )
return {
"accuracy": accuracy,
}
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
import heapq
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case_ , [-1 * len(snake_case_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase_: List[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase_: Any = heapq.heappop(snake_case_ )[1][0]
chosen_vertices.add(snake_case_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase_: List[str] = elem[1][1].index(snake_case_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 423 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger("""transformers.models.speecht5""")
__lowerCamelCase : int = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
__lowerCamelCase : str = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
__lowerCamelCase : List[str] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
__lowerCamelCase : List[str] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
__lowerCamelCase : List[str] = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
__lowerCamelCase : List[Any] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
__lowerCamelCase : Dict = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
__lowerCamelCase : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
__lowerCamelCase : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : str = []
__lowerCamelCase : List[Any] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
__lowerCamelCase : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : List[str] = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : int = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : str = value
elif weight_type == "running_mean":
snake_case__ : Tuple = value
elif weight_type == "running_var":
snake_case__ : Any = value
elif weight_type == "num_batches_tracked":
snake_case__ : Dict = value
else:
snake_case__ : List[Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Optional[Any] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case__, snake_case__ : str = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : int ):
snake_case__ : List[str] = []
if task == "s2t":
snake_case__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Dict = MAPPING_S2T
snake_case__ : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case__ : int = None
snake_case__ : List[Any] = MAPPING_T2S
snake_case__ : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case__ : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Optional[Any] = MAPPING_S2S
snake_case__ : str = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case_ , snake_case_ ):
logger.info(F'''{name} was ignored''' )
continue
snake_case__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case__, snake_case__ : int = key.split(".*." )
if prefix in name and suffix in name:
snake_case__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Optional[Any] = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : Dict = "weight_g"
elif "weight_v" in name:
snake_case__ : int = "weight_v"
elif "bias" in name:
snake_case__ : Any = "bias"
elif "weight" in name:
snake_case__ : List[str] = "weight"
elif "running_mean" in name:
snake_case__ : Union[str, Any] = "running_mean"
elif "running_var" in name:
snake_case__ : Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
snake_case__ : Any = "num_batches_tracked"
else:
snake_case__ : Dict = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : List[Any] = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : str=None , ):
if config_path is not None:
snake_case__ : List[Any] = SpeechTaConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Optional[Any] = SpeechTaConfig()
if task == "s2t":
snake_case__ : Optional[Any] = config.max_text_positions
snake_case__ : str = SpeechTaForSpeechToText(snake_case_ )
elif task == "t2s":
snake_case__ : Optional[int] = 1876
snake_case__ : Optional[int] = 600
snake_case__ : List[Any] = config.max_speech_positions
snake_case__ : Tuple = SpeechTaForTextToSpeech(snake_case_ )
elif task == "s2s":
snake_case__ : Any = 1876
snake_case__ : List[str] = config.max_speech_positions
snake_case__ : Dict = SpeechTaForSpeechToSpeech(snake_case_ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
snake_case__ : Optional[int] = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ )
snake_case__ : Dict = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case__ : int = SpeechTaFeatureExtractor()
snake_case__ : Optional[Any] = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(snake_case_ )
snake_case__ : Optional[Any] = torch.load(snake_case_ )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case_ , snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 297 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__magic_name__ = datasets.utils.logging.get_logger(__name__)
__magic_name__ = ["names", "prefix"]
__magic_name__ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__magic_name__ = ["encoding_errors", "on_bad_lines"]
__magic_name__ = ["date_format"]
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
a_ = ""","""
a_ = None
a_ = """infer"""
a_ = None
a_ = None
a_ = None
a_ = None
a_ = None
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
a_ = False
a_ = None
a_ = None
a_ = None
a_ = True
a_ = True
a_ = False
a_ = True
a_ = None
a_ = """."""
a_ = None
a_ = """\""""
a_ = 0
a_ = None
a_ = None
a_ = None
a_ = None
a_ = True
a_ = True
a_ = 0
a_ = True
a_ = False
a_ = None
a_ = 10_000
a_ = None
a_ = """strict"""
a_ = """error"""
a_ = None
def _a ( self : Dict ):
'''simple docstring'''
if self.delimiter is not None:
A_ : str = self.delimiter
if self.column_names is not None:
A_ : List[str] = self.column_names
@property
def _a ( self : Any ):
'''simple docstring'''
A_ : str = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,_a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
a_ = CsvConfig
def _a ( self : List[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a ,(str, list, tuple) ):
A_ : Dict = data_files
if isinstance(_a ,_a ):
A_ : Dict = [files]
A_ : Dict = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
A_ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(_a ,_a ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={"""files""": files} ) )
return splits
def _a ( self : List[Any] ,_a : str ):
'''simple docstring'''
if self.config.features is not None:
A_ : str = self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
A_ : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A_ : Optional[int] = table_cast(_a ,_a )
return pa_table
def _a ( self : List[str] ,_a : List[str] ):
'''simple docstring'''
A_ : Dict = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
A_ : Optional[Any] = pd.read_csv(_a ,iterator=_a ,dtype=_a ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
A_ : Union[str, Any] = pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_a )}: {e}' )
raise
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=__lowerCAmelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=__lowerCAmelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=__lowerCAmelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=__lowerCAmelCase , default=0 , help="""cuda_id.""" , )
lowercase_ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
if not len(__lowerCAmelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new("""RGB""" , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(__lowerCAmelCase ):
grid.paste(__lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="robotic cat with wings" , __lowerCAmelCase=7.5 , __lowerCAmelCase=50 , __lowerCAmelCase=1 , __lowerCAmelCase=42 , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(__lowerCAmelCase )
lowercase_ = pipeline(
__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , ).images
lowercase_ = int(math.sqrt(__lowerCAmelCase ) )
lowercase_ = image_grid(__lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase : str = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
UpperCAmelCase : Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
UpperCAmelCase : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
UpperCAmelCase : Dict = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
UpperCAmelCase : Any = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
UpperCAmelCase : Any = unet.to(torch.device("cuda", args.cuda_id))
UpperCAmelCase : Optional[int] = pipeline.to(unet.device)
UpperCAmelCase , UpperCAmelCase : Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
UpperCAmelCase : str = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 567 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 567 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a :Any = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = ["""DPTFeatureExtractor"""]
a :int = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Tuple = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_SCREAMING_SNAKE_CASE :str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowercase ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
datasets.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE__ : str = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = train_dataset.features["""label"""].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : int = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE__ : int = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.features["""label"""].names
# Labels
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE__ : str = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def preprocess_function(__lowerCAmelCase ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__ : str = train_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Any = min(len(__lowerCAmelCase ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : List[str] = eval_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE__ : int = min(len(__lowerCAmelCase ) , data_args.max_predict_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = predict_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
SCREAMING_SNAKE_CASE__ : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE__ : List[Any] = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE__ : int = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Dict = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_checkpoint
SCREAMING_SNAKE_CASE__ : str = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = train_result.metrics
SCREAMING_SNAKE_CASE__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : Dict = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCAmelCase )
trainer.save_metrics("""train""" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE__ : Any = trainer.evaluate(eval_dataset=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""eval""" , __lowerCAmelCase )
trainer.save_metrics("""eval""" , __lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = trainer.predict(__lowerCAmelCase , metric_key_prefix="""predict""" )
SCREAMING_SNAKE_CASE__ : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("""predict""" , __lowerCAmelCase )
trainer.save_metrics("""predict""" , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = np.argmax(__lowerCAmelCase , axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 12 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_lowerCamelCase = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowerCamelCase = {'mustc': MUSTC_LANGS}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = MAX_MODEL_INPUT_SIZES
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = []
def __init__( self :Union[str, Any] , __A :Dict , __A :Tuple , __A :List[str]="<s>" , __A :str="</s>" , __A :List[str]="<pad>" , __A :Union[str, Any]="<unk>" , __A :List[Any]=False , __A :Tuple=False , __A :Optional[int]=None , __A :Dict=None , __A :Optional[Dict[str, Any]] = None , **__A :int , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , do_upper_case=__A , do_lower_case=__A , tgt_lang=__A , lang_codes=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
SCREAMING_SNAKE_CASE__ = do_upper_case
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = load_json(__A )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = spm_file
SCREAMING_SNAKE_CASE__ = load_spm(__A , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE__ = lang_codes
SCREAMING_SNAKE_CASE__ = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE__ = [f'''<lang:{lang}>''' for lang in self.langs]
SCREAMING_SNAKE_CASE__ = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
SCREAMING_SNAKE_CASE__ = self.lang_tokens
SCREAMING_SNAKE_CASE__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE__ = {}
@property
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def _snake_case ( self :str ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _snake_case ( self :List[Any] , __A :Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = new_tgt_lang
self.set_tgt_lang_special_tokens(__A )
def _snake_case ( self :Union[str, Any] , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE__ = [lang_code_id]
def _snake_case ( self :Optional[int] , __A :str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self :List[Any] , __A :Optional[Any] ) -> Any:
"""simple docstring"""
return self.encoder.get(__A , self.encoder[self.unk_token] )
def _snake_case ( self :List[Any] , __A :int ) -> str:
"""simple docstring"""
return self.decoder.get(__A , self.unk_token )
def _snake_case ( self :str , __A :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE__ = self.sp_model.decode(__A )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(__A )
SCREAMING_SNAKE_CASE__ = self.sp_model.decode(__A )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _snake_case ( self :Any , __A :Optional[int] , __A :Dict=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self :int , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _snake_case ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self :Tuple , __A :Dict ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self :Dict , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path(__A )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
SCREAMING_SNAKE_CASE__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
SCREAMING_SNAKE_CASE__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Dict[str, Any] ):
SCREAMING_SNAKE_CASE__ = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ )
spm.Load(str(UpperCamelCase__ ) )
return spm
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
with open(UpperCamelCase__ , """r""" ) as f:
return json.load(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str ):
with open(UpperCamelCase__ , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=2 ) | 6 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 1 |
'''simple docstring'''
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = len(_lowerCamelCase )
lowerCAmelCase = sum(_lowerCamelCase )
lowerCAmelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase = True
for i in range(1 , s + 1 ):
lowerCAmelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase = s - 2 * j
break
return diff
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 0 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( a ) -> bytes:
__A : int = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__A : int = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(a ).content
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input('''Enter Video/IGTV url: ''').strip()
UpperCAmelCase : Any = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 239 |
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Union[str, Any] = get_logger(__name__)
class _A:
"""simple docstring"""
def __init__( self , _A , _A=None ):
__A : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _A , getattr(_A , _A ) )
__A : Optional[int] = module._original_module if isinstance(_A , _PatchedModuleObj ) else module
class _A:
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
def __init__( self , _A , _A , _A , _A=None ):
__A : List[Any] = obj
__A : Dict = target
__A : Optional[int] = new
__A : Optional[Any] = target.split('.' )[0]
__A : Tuple = {}
__A : Dict = attrs or []
def __enter__( self ):
*__A , __A : str = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_A ) ):
try:
__A : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__A : Union[str, Any] = getattr(self.obj , _A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_A , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__A : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , _A , _PatchedModuleObj(_A , attrs=self.attrs ) )
__A : List[str] = getattr(self.obj , _A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_A , _A , _PatchedModuleObj(getattr(_A , _A , _A ) , attrs=self.attrs ) )
__A : str = getattr(_A , _A )
# finally set the target attribute
setattr(_A , _A , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__A : Union[str, Any] = getattr(import_module('.'.join(_A ) ) , _A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _A ) is attr_value:
__A : Union[str, Any] = getattr(self.obj , _A )
setattr(self.obj , _A , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__A : Tuple = globals()['__builtins__'][target_attr]
setattr(self.obj , _A , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *_A ):
for attr in list(self.original ):
setattr(self.obj , _A , self.original.pop(_A ) )
def UpperCAmelCase_ ( self ):
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 239 | 1 |
import os
from collections.abc import Iterator
def _A ( lowerCamelCase = "." ):
for dir_path, dir_names, filenames in os.walk(lowerCamelCase ):
a__ : List[str] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase , lowerCamelCase ).lstrip("./" )
def _A ( lowerCamelCase ):
return F"""{i * " "}*""" if i else "\n##"
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : str = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(lowerCamelCase )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def _A ( lowerCamelCase = "." ):
a__ : Union[str, Any] = ""
for filepath in sorted(good_file_paths(lowerCamelCase ) ):
a__ , a__ : List[str] = os.path.split(lowerCamelCase )
if filepath != old_path:
a__ : Tuple = print_path(lowerCamelCase , lowerCamelCase )
a__ : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
a__ : List[str] = F"""{filepath}/{filename}""".replace(" " , "%20" )
a__ : Optional[Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F"""{md_prefix(lowerCamelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A_ : int = k.replace(__lowercase ,__lowercase )
if k.startswith('encoder' ):
A_ : int = k.replace('.attn' ,'.self_attn' )
A_ : List[Any] = k.replace('norm1' ,'self_attn_layer_norm' )
A_ : Optional[Any] = k.replace('norm2' ,'final_layer_norm' )
elif k.startswith('decoder' ):
A_ : Optional[Any] = k.replace('norm1' ,'self_attn_layer_norm' )
A_ : Optional[Any] = k.replace('norm2' ,'encoder_attn_layer_norm' )
A_ : Optional[Any] = k.replace('norm3' ,'final_layer_norm' )
return k
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
A_ : str = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
A_ : str = sd.pop(__lowercase )
A_ : List[str] = k.replace('layernorm_embedding' ,'layer_norm' )
assert new_k not in sd
A_ : Tuple = v
_UpperCAmelCase = ["""START"""]
@torch.no_grad()
def UpperCamelCase ( __lowercase : str ,__lowercase : Tuple ,__lowercase : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = torch.load(__lowercase ,map_location='cpu' )
A_ : Optional[int] = model['model']
A_ : Any = BlenderbotConfig.from_json_file(__lowercase )
A_ : str = BlenderbotForConditionalGeneration(__lowercase )
A_ : Tuple = m.model.state_dict().keys()
A_ : Tuple = []
A_ : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A_ : str = rename_state_dict_key(__lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A_ : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowercase )
m.model.load_state_dict(__lowercase ,strict=__lowercase )
m.half()
m.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
_UpperCAmelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 558 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCamelCase ( __lowercase : int ,__lowercase : List[str] ,__lowercase : str ,__lowercase : Optional[Any] ,__lowercase : Any ):
'''simple docstring'''
for attribute in key.split('.' ):
A_ : Dict = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Any = getattr(__lowercase ,__lowercase ).shape
else:
A_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : int = value
elif weight_type == "weight_g":
A_ : Tuple = value
elif weight_type == "weight_v":
A_ : Union[str, Any] = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : str = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict ,__lowercase : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : Tuple = fairseq_model.state_dict()
A_ : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
A_ : str = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : str = name.split(__lowercase )[0].split('.' )[-2]
A_ : Optional[Any] = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Dict = 'weight_g'
elif "weight_v" in name:
A_ : Tuple = 'weight_v'
elif "weight" in name:
A_ : Union[str, Any] = 'weight'
elif "bias" in name:
A_ : Optional[Any] = 'bias'
else:
A_ : Union[str, Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ,__lowercase : Any ,__lowercase : List[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = full_name.split('conv_layers.' )[-1]
A_ : Any = name.split('.' )
A_ : Dict = int(items[0] )
A_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : str ):
'''simple docstring'''
A_ : Union[str, Any] = SEWConfig()
if is_finetuned:
A_ : Any = model.wav_encoder.wav_model.cfg
else:
A_ : int = model.cfg
A_ : Any = fs_config.conv_bias
A_ : Dict = eval(fs_config.conv_feature_layers )
A_ : List[Any] = [x[0] for x in conv_layers]
A_ : Optional[Any] = [x[1] for x in conv_layers]
A_ : List[Any] = [x[2] for x in conv_layers]
A_ : Optional[int] = 'gelu'
A_ : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
A_ : Tuple = 0.0
A_ : Dict = fs_config.activation_fn.name
A_ : List[Any] = fs_config.encoder_embed_dim
A_ : int = 0.02
A_ : List[str] = fs_config.encoder_ffn_embed_dim
A_ : Any = 1e-5
A_ : Optional[Any] = fs_config.encoder_layerdrop
A_ : Optional[int] = fs_config.encoder_attention_heads
A_ : Any = fs_config.conv_pos_groups
A_ : int = fs_config.conv_pos
A_ : Tuple = len(__lowercase )
A_ : List[Any] = fs_config.encoder_layers
A_ : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A_ : Union[str, Any] = model.cfg
A_ : str = fs_config.final_dropout
A_ : Any = fs_config.layerdrop
A_ : str = fs_config.activation_dropout
A_ : Any = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A_ : str = fs_config.attention_dropout
A_ : Any = fs_config.dropout_input
A_ : Dict = fs_config.dropout
A_ : Optional[Any] = fs_config.mask_channel_length
A_ : List[str] = fs_config.mask_channel_prob
A_ : Tuple = fs_config.mask_length
A_ : Dict = fs_config.mask_prob
A_ : Any = 'Wav2Vec2FeatureExtractor'
A_ : Union[str, Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : int ,__lowercase : Optional[int]=None ,__lowercase : Optional[Any]=None ,__lowercase : str=True ):
'''simple docstring'''
if is_finetuned:
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A_ : Union[str, Any] = SEWConfig.from_pretrained(__lowercase )
else:
A_ : Dict = convert_config(model[0] ,__lowercase )
A_ : Union[str, Any] = model[0].eval()
A_ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
A_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowercase ,return_attention_mask=__lowercase ,)
if is_finetuned:
if dict_path:
A_ : Optional[int] = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : int = target_dict.pad_index
A_ : List[Any] = target_dict.bos_index
A_ : Optional[Any] = target_dict.pad_index
A_ : str = target_dict.bos_index
A_ : str = target_dict.eos_index
A_ : str = len(target_dict.symbols )
A_ : Union[str, Any] = os.path.join(__lowercase ,'vocab.json' )
if not os.path.isdir(__lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase ) )
return
os.makedirs(__lowercase ,exist_ok=__lowercase )
with open(__lowercase ,'w' ,encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices ,__lowercase )
A_ : Any = WavaVecaCTCTokenizer(
__lowercase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='|' ,do_lower_case=__lowercase ,)
A_ : Tuple = WavaVecaProcessor(feature_extractor=__lowercase ,tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
A_ : Dict = SEWForCTC(__lowercase )
else:
A_ : Tuple = SEWModel(__lowercase )
feature_extractor.save_pretrained(__lowercase )
recursively_load_weights(__lowercase ,__lowercase ,__lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 558 | 1 |
"""simple docstring"""
def snake_case ( A__ = 1_00_00_00 ):
UpperCAmelCase_ : Optional[int] = set(range(3 ,A__ ,2 ) )
primes.add(2 )
for p in range(3 ,A__ ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,A__ ,A__ ) ) )
UpperCAmelCase_ : Any = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ ,limit + 1 ,A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 463 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( A__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(A__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase_ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def snake_case ( A__ ):
if not isinstance(A__ ,A__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ : List[str] = []
for num in range(len(A__ ) ):
UpperCAmelCase_ : Dict = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ : List[str] = odd_composites[num] - 2 * i * i
if is_prime(A__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A__ ) == n:
return list_nums
return []
def snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 463 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any]):
"""simple docstring"""
lowercase_ = []
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
return self.node_position[vertex]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = pos
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase_ = 2 * start + 1
else:
lowercase_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase_ , lowercase_ = heap[smallest_child], positions[smallest_child]
lowercase_ , lowercase_ = (
heap[start],
positions[start],
)
lowercase_ , lowercase_ = temp, tempa
lowercase_ = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , lowerCAmelCase_)
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = position[index]
while index != 0:
lowercase_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
lowercase_ = heap[parent]
lowercase_ = position[parent]
self.set_position(position[parent] , lowerCAmelCase_)
else:
lowercase_ = val
lowercase_ = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_)
break
lowercase_ = parent
else:
lowercase_ = val
lowercase_ = temp
self.set_position(lowerCAmelCase_ , 0)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = len(lowerCAmelCase_) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = positions[0]
lowercase_ = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_) , lowerCAmelCase_)
return temp
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = Heap()
lowercase_ = [0] * len(__lowerCAmelCase )
lowercase_ = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase_ = [] # Heap of Distance of vertices from their neighboring vertex
lowercase_ = []
for vertex in range(len(__lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCAmelCase )
heap.node_position.append(__lowerCAmelCase )
lowercase_ = []
lowercase_ = 1
lowercase_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase_ = 0
lowercase_ = distance
heap.heapify(__lowerCAmelCase , __lowerCAmelCase )
for _ in range(1 , len(__lowerCAmelCase ) ):
lowercase_ = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCAmelCase )]
):
lowercase_ = distance
heap.bottom_to_top(
__lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase : List[str] = int(input("Enter number of edges: ").strip())
UpperCAmelCase : Tuple = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 567 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 567 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ : Union[str, Any] = 'microsoft/speecht5_tts'
snake_case__ : Union[str, Any] = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
snake_case__ : List[Any] = 'text_reader'
snake_case__ : Optional[int] = SpeechTaProcessor
snake_case__ : Tuple = SpeechTaForTextToSpeech
snake_case__ : Optional[int] = SpeechTaHifiGan
snake_case__ : Optional[int] = ['text']
snake_case__ : Optional[int] = ['audio']
def a_ ( self : List[Any] ):
"""simple docstring"""
if self.post_processor is None:
__lowerCamelCase : int = '''microsoft/speecht5_hifigan'''
super().setup()
def a_ ( self : Optional[int] , A__ : Optional[int] , A__ : Tuple=None ):
"""simple docstring"""
__lowerCamelCase : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__lowerCamelCase : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__lowerCamelCase : Dict = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def a_ ( self : Optional[int] , A__ : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def a_ ( self : int , A__ : List[Any] ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 711 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase__ :List[str] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase__ :Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowercase (_lowercase, _lowercase ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def __lowercase (_lowercase, _lowercase ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(_lowercase, _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowercase () -> None:
"""simple docstring"""
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""", number=10_000, globals=globals(), ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""", number=10_000, globals=globals(), ) )
benchmark()
| 483 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __a , unittest.TestCase ):
_A :List[str] = MgpstrTokenizer
_A :Optional[Any] = False
_A :Optional[int] = {}
_A :Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
super().setUp()
# fmt: off
lowercase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case__ ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : int , **snake_case__ : List[str] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[str] ):
lowercase = """tester"""
lowercase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowercase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowercase = tokenizer.encode([special_token] , add_special_tokens=snake_case__ )
self.assertEqual(len(snake_case__ ) , 1 )
lowercase = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowercase , lowercase = self.get_input_output_texts(snake_case__ )
lowercase = tokenizer.tokenize(snake_case__ )
lowercase = tokenizer.convert_tokens_to_ids(snake_case__ )
lowercase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertNotEqual(len(snake_case__ ) , 0 )
lowercase = tokenizer.decode(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
| 428 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( __a ):
def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any]=13 , snake_case__ : Dict=7 , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : Any=True , snake_case__ : Union[str, Any]=False , snake_case__ : Any=False , snake_case__ : Optional[int]=False , snake_case__ : int=2 , snake_case__ : str=99 , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=32 , snake_case__ : Any=5 , snake_case__ : str=4 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : str=5_12 , snake_case__ : Union[str, Any]=12 , snake_case__ : Any=2 , snake_case__ : Tuple=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : Tuple=4 , snake_case__ : Union[str, Any]="last" , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_lengths
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = gelu_activation
lowercase = sinusoidal_embeddings
lowercase = causal
lowercase = asm
lowercase = n_langs
lowercase = vocab_size
lowercase = n_special
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = summary_type
lowercase = use_proj
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_input_lengths:
lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , 2 ).float()
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[Any] , ):
lowercase = FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase = model(snake_case__ , langs=snake_case__ )
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , ):
lowercase = FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , ):
lowercase = FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str , ):
lowercase = FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase) , ) = result_with_labels.to_tuple()
lowercase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , ):
lowercase = FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : int , ):
lowercase = self.num_labels
lowercase = FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , ):
lowercase = self.num_choices
lowercase = FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __a , __a , unittest.TestCase ):
_A :List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A :Optional[Any] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any]=False ):
lowercase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase = FlaubertModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase = True
lowercase = model_class(config=snake_case__ )
lowercase = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase = torch.jit.trace(
snake_case__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , """traced_model.pt""" ) )
lowercase = torch.jit.load(os.path.join(snake_case__ , """traced_model.pt""" ) , map_location=snake_case__ )
loaded(inputs_dict["""input_ids"""].to(snake_case__ ) , inputs_dict["""attention_mask"""].to(snake_case__ ) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
lowercase = model(snake_case__ )[0]
lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
lowercase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 428 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 207 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_A = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase :
_lowerCamelCase :int = 42
_lowerCamelCase :str = 42
_lowerCamelCase :List[Any] = 42
@dataclass
class _lowerCamelCase :
_lowerCamelCase :Tuple = 42
_lowerCamelCase :Optional[int] = 42
_lowerCamelCase :List[str] = None
_lowerCamelCase :Tuple = None
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[Any] = "train"
_lowerCamelCase :Optional[int] = "dev"
_lowerCamelCase :Any = "test"
class _lowerCamelCase :
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : Any , UpperCamelCase : Union[Split, str] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[InputExample] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : Dict=False , UpperCamelCase : List[Any]="[CLS]" , UpperCamelCase : Any=1 , UpperCamelCase : Optional[int]="[SEP]" , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[Any]=False , UpperCamelCase : str=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Any=-1_00 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : int=True , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {label: i for i, label in enumerate(snake_case_ )}
lowerCAmelCase__ : Optional[Any] = []
for ex_index, example in enumerate(snake_case_ ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , snake_case_ , len(snake_case_ ) )
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(snake_case_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case_ ) > 0:
tokens.extend(snake_case_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase__ : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(snake_case_ ) > max_seq_length - special_tokens_count:
lowerCAmelCase__ : str = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase__ : int = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase__ : Union[str, Any] = [sequence_a_segment_id] * len(snake_case_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase__ : Optional[Any] = [cls_token] + tokens
lowerCAmelCase__ : Union[str, Any] = [pad_token_label_id] + label_ids
lowerCAmelCase__ : Optional[int] = [cls_token_segment_id] + segment_ids
lowerCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(snake_case_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase__ : Any = [1 if mask_padding_with_zero else 0] * len(snake_case_ )
# Zero-pad up to the sequence length.
lowerCAmelCase__ : Union[str, Any] = max_seq_length - len(snake_case_ )
if pad_on_left:
lowerCAmelCase__ : List[str] = ([pad_token] * padding_length) + input_ids
lowerCAmelCase__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase__ : List[str] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(snake_case_ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(snake_case_ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(snake_case_ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(snake_case_ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(snake_case_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase__ : Any = None
features.append(
InputFeatures(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , label_ids=snake_case_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[Any] = 42
_lowerCamelCase :List[Any] = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[Any] , UpperCamelCase : TokenClassificationTask , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : Tuple=False , UpperCamelCase : Split = Split.train , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = os.path.join(
snake_case_ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(snake_case_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ : Any = cached_features_file + '''.lock'''
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowerCAmelCase__ : str = torch.load(snake_case_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowerCAmelCase__ : Dict = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase__ : List[str] = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , snake_case_ )
def __len__( self : Tuple ) -> Dict:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase :
_lowerCamelCase :Union[str, Any] = 42
_lowerCamelCase :Dict = -100
def __init__( self : Any , UpperCamelCase : TokenClassificationTask , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : List[str]=False , UpperCamelCase : Split = Split.train , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase__ : int = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase__ : int = tf.data.Dataset.from_generator(
snake_case_ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase__ : Optional[Any] = tf.data.Dataset.from_generator(
snake_case_ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return self.features[i]
| 299 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case__ : List[str] = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case_ , snake_case_ )
def __magic_name__ ( self : Tuple , **snake_case_ : Tuple ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : Any , **snake_case_ : str ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : List[str] , **snake_case_ : Optional[int] ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : List[str] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : int = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
snake_case__ : int = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Union[str, Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
snake_case__ : List[Any] = self.get_image_processor(do_normalize=snake_case_ )
snake_case__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=snake_case_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : Dict = self.prepare_image_inputs()
snake_case__ : Optional[Any] = image_processor(snake_case_ , return_tensors='''np''' )
snake_case__ : str = processor(images=snake_case_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Any = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : Optional[int] = '''Alexandra,T-shirt的价格是15便士。'''
snake_case__ : List[Any] = processor(text=snake_case_ )
snake_case__ : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Tuple = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : int = '''Alexandra,T-shirt的价格是15便士。'''
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Dict = processor.batch_decode(snake_case_ )
snake_case__ : int = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Dict = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : Any = '''Alexandra,T-shirt的价格是15便士。'''
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 347 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] = logging.get_logger("transformers.models.speecht5")
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase__ = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase__ = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase__ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase__ = checkpoint[f"""upsamples.{i}.1.weight_g"""]
lowerCAmelCase__ = checkpoint[f"""upsamples.{i}.1.weight_v"""]
lowerCAmelCase__ = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCAmelCase__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCAmelCase__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCAmelCase__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCAmelCase__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCAmelCase__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCAmelCase__ = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase__ = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase__ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase__ )
else:
lowerCAmelCase__ = SpeechTaHifiGanConfig()
lowerCAmelCase__ = SpeechTaHifiGan(UpperCAmelCase__ )
lowerCAmelCase__ = torch.load(UpperCAmelCase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase__ = np.load(UpperCAmelCase__ )
lowerCAmelCase__ = stats[0].reshape(-1 )
lowerCAmelCase__ = stats[1].reshape(-1 )
lowerCAmelCase__ = torch.from_numpy(UpperCAmelCase__ ).float()
lowerCAmelCase__ = torch.from_numpy(UpperCAmelCase__ ).float()
model.save_pretrained(UpperCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 714 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def _UpperCAmelCase ( ):
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 674 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "char"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bpe"
_SCREAMING_SNAKE_CASE : List[Any] = "wp"
lowercase__ =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["image_processor", "char_tokenizer"]
_SCREAMING_SNAKE_CASE : Optional[Any] = "ViTImageProcessor"
_SCREAMING_SNAKE_CASE : Tuple = "MgpstrTokenizer"
def __init__(self : Optional[int] , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , **snake_case_ : List[Any] ):
__a : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
__a : str = kwargs.pop('''feature_extractor''' )
__a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__a : Union[str, Any] = tokenizer
__a : Union[str, Any] = AutoTokenizer.from_pretrained('''gpt2''' )
__a : str = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(snake_case_ , snake_case_ )
def __call__(self : List[Any] , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , snake_case_ : Tuple=None , **snake_case_ : Any ):
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__a : int = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None:
__a : Optional[Any] = self.char_tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a : str = encodings['''input_ids''']
return inputs
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a , __a , __a : List[str] = sequences
__a : Dict = char_preds.size(0 )
__a , __a : List[str] = self._decode_helper(snake_case_ , '''char''' )
__a , __a : str = self._decode_helper(snake_case_ , '''bpe''' )
__a , __a : Optional[int] = self._decode_helper(snake_case_ , '''wp''' )
__a : Dict = []
__a : List[Any] = []
for i in range(snake_case_ ):
__a : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__a : Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
__a : Optional[int] = scores.index(max(snake_case_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__a : Dict = {}
__a : str = final_strs
__a : int = final_scores
__a : int = char_strs
__a : Union[str, Any] = bpe_strs
__a : Any = wp_strs
return out
def lowerCAmelCase (self : int , snake_case_ : List[str] , snake_case_ : str ):
if format == DecodeType.CHARACTER:
__a : Dict = self.char_decode
__a : Optional[Any] = 1
__a : Any = '''[s]'''
elif format == DecodeType.BPE:
__a : Union[str, Any] = self.bpe_decode
__a : Union[str, Any] = 2
__a : List[str] = '''#'''
elif format == DecodeType.WORDPIECE:
__a : Any = self.wp_decode
__a : List[str] = 1_0_2
__a : str = '''[SEP]'''
else:
raise ValueError(f"Format {format} is not supported." )
__a , __a : Optional[Any] = [], []
__a : Optional[Any] = pred_logits.size(0 )
__a : Dict = pred_logits.size(1 )
__a , __a : Optional[Any] = pred_logits.topk(1 , dim=-1 , largest=snake_case_ , sorted=snake_case_ )
__a : int = preds_index.view(-1 , snake_case_ )[:, 1:]
__a : Dict = decoder(snake_case_ )
__a , __a : Union[str, Any] = torch.nn.functional.softmax(snake_case_ , dim=2 ).max(dim=2 )
__a : Tuple = preds_max_prob[:, 1:]
for index in range(snake_case_ ):
__a : Any = preds_str[index].find(snake_case_ )
__a : List[str] = preds_str[index][:pred_eos]
__a : int = preds_index[index].cpu().tolist()
__a : Dict = pred_index.index(snake_case_ ) if eos_token in pred_index else -1
__a : Tuple = preds_max_prob[index][: pred_eos_index + 1]
__a : Any = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case_ )
conf_scores.append(snake_case_ )
return dec_strs, conf_scores
def lowerCAmelCase (self : List[Any] , snake_case_ : Any ):
__a : Any = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(snake_case_ )]
return decode_strs
def lowerCAmelCase (self : int , snake_case_ : Dict ):
return self.bpe_tokenizer.batch_decode(snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : Any ):
__a : Union[str, Any] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(snake_case_ )]
return decode_strs
| 521 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : Optional[int] = "CLIPImageProcessor"
_SCREAMING_SNAKE_CASE : str = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__(self : Tuple , snake_case_ : str=None , snake_case_ : str=None , **snake_case_ : str ):
__a : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
__a : Optional[int] = kwargs.pop('''feature_extractor''' )
__a : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
def __call__(self : List[Any] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Dict=None , **snake_case_ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a : Optional[int] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
__a : Optional[Any] = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
__a : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , *snake_case_ : int , **snake_case_ : str ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Any , *snake_case_ : List[str] , **snake_case_ : Dict ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase (self : Optional[Any] ):
__a : int = self.tokenizer.model_input_names
__a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 521 | 1 |
'''simple docstring'''
from string import ascii_uppercase
_UpperCAmelCase : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Optional[int] = dict(enumerate(ascii_uppercase))
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = 0
while True:
if x == i:
__lowerCAmelCase = 0
if len(lowerCamelCase) == len(lowerCamelCase):
break
key += key[i]
i += 1
return key
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__( ):
__lowerCAmelCase = '''THE GERMAN ATTACK'''
__lowerCAmelCase = '''SECRET'''
__lowerCAmelCase = generate_key(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = cipher_text(lowerCamelCase, lowerCamelCase)
print(F"""Encrypted Text = {s}""")
print(F"""Original Text = {original_text(lowerCamelCase, lowerCamelCase)}""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
'''simple docstring'''
from string import ascii_uppercase
_UpperCAmelCase : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Optional[int] = dict(enumerate(ascii_uppercase))
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = 0
while True:
if x == i:
__lowerCAmelCase = 0
if len(lowerCamelCase) == len(lowerCamelCase):
break
key += key[i]
i += 1
return key
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__( ):
__lowerCAmelCase = '''THE GERMAN ATTACK'''
__lowerCAmelCase = '''SECRET'''
__lowerCAmelCase = generate_key(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = cipher_text(lowerCamelCase, lowerCamelCase)
print(F"""Encrypted Text = {s}""")
print(F"""Original Text = {original_text(lowerCamelCase, lowerCamelCase)}""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 474 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
"""simple docstring"""
from collections.abc import Callable
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: float = a
_lowercase: float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
_lowercase: float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
_lowercase: Union[str, Any] = mid
else:
_lowercase: Any = mid
_lowercase: List[Any] = start + (end - start) / 2.0
return mid
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 353 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase__ :Optional[Any] = datasets.utils.logging.get_logger(__name__)
class snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_A : Tuple = None
_A : Union[str, Any] = None
class snake_case ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_A : Optional[Any] = datasets.Audio()
_A : Optional[Any] = 'audio'
_A : str = AudioFolderConfig
_A : List[str] = 42 # definition at the bottom of the script
_A : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' )
lowercase__ :Any = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowercase__ :int = AUDIO_EXTENSIONS | 702 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ ) ->float:
"""simple docstring"""
return 10 - x * x
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->float:
"""simple docstring"""
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase : Tuple = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase : List[str] = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) < 0:
__UpperCAmelCase : Union[str, Any] = c
else:
__UpperCAmelCase : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 374 | 0 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : str = abs(__a )
_a : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Union[str, Any] = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return sum(int(__a ) for c in str(abs(__a ) ) )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a : Callable ,__a : int ) -> None:
_a : str = F"""{func.__name__}({value})"""
_a : List[Any] = timeit(F"""__main__.{call}""" ,setup='''import __main__''' )
print(F"""{call:56} = {func(__a )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a ,__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : str = "ResNetConfig"
# Base docstring
UpperCAmelCase_ : Union[str, Any] = "microsoft/resnet-50"
UpperCAmelCase_ : List[Any] = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Optional[int] = "microsoft/resnet-50"
UpperCAmelCase_ : Tuple = "tiger cat"
UpperCAmelCase_ : List[Any] = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" ) -> List[Any]:
super().__init__()
_a : Dict = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase )
_a : Tuple = nn.BatchNormad(__lowerCAmelCase )
_a : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
_a : str = self.convolution(__lowerCAmelCase )
_a : List[Any] = self.normalization(__lowerCAmelCase )
_a : Optional[Any] = self.activation(__lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Union[str, Any]:
super().__init__()
_a : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : Optional[int] = config.num_channels
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
_a : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_a : Any = self.embedder(__lowerCAmelCase )
_a : Union[str, Any] = self.pooler(__lowerCAmelCase )
return embedding
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ) -> Any:
super().__init__()
_a : Tuple = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
_a : Optional[Any] = nn.BatchNormad(__lowerCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
_a : Any = self.convolution(__lowerCAmelCase )
_a : Dict = self.normalization(__lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" ) -> Any:
super().__init__()
_a : Tuple = in_channels != out_channels or stride != 1
_a : Tuple = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_a : Tuple = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , )
_a : Any = ACTaFN[activation]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
_a : int = hidden_state
_a : List[str] = self.layer(__lowerCAmelCase )
_a : Dict = self.shortcut(__lowerCAmelCase )
hidden_state += residual
_a : Dict = self.activation(__lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , lowerCamelCase_ = 4 ) -> List[Any]:
super().__init__()
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : List[Any] = out_channels // reduction
_a : Any = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_a : List[Any] = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
_a : str = ACTaFN[activation]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
_a : Optional[int] = hidden_state
_a : str = self.layer(__lowerCAmelCase )
_a : List[str] = self.shortcut(__lowerCAmelCase )
hidden_state += residual
_a : Dict = self.activation(__lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ) -> Optional[Any]:
super().__init__()
_a : List[Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_a : int = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
_a : List[Any] = input
for layer in self.layers:
_a : Union[str, Any] = layer(__lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Tuple:
super().__init__()
_a : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Tuple:
_a : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : int = hidden_states + (hidden_state,)
_a : Any = stage_module(__lowerCAmelCase )
if output_hidden_states:
_a : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = ResNetConfig
__lowerCAmelCase : Any = """resnet"""
__lowerCAmelCase : Tuple = """pixel_values"""
__lowerCAmelCase : str = True
def __UpperCamelCase ( self , lowerCamelCase_ ) -> str:
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_a : Union[str, Any] = value
UpperCAmelCase_ : List[str] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , snake_case__ , )
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
super().__init__(__lowerCAmelCase )
_a : Optional[Any] = config
_a : str = ResNetEmbeddings(__lowerCAmelCase )
_a : List[str] = ResNetEncoder(__lowerCAmelCase )
_a : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> int:
_a : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(__lowerCAmelCase )
_a : int = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
_a : Tuple = encoder_outputs[0]
_a : List[str] = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
super().__init__(__lowerCAmelCase )
_a : Optional[int] = config.num_labels
_a : str = ResNetModel(__lowerCAmelCase )
# classification head
_a : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Optional[int]:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : int = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
_a : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
_a : int = self.classifier(__lowerCAmelCase )
_a : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Union[str, Any] = 'single_label_classification'
else:
_a : Union[str, Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
_a : List[Any] = MSELoss()
if self.num_labels == 1:
_a : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : Dict = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
_a : Union[str, Any] = CrossEntropyLoss()
_a : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : int = BCEWithLogitsLoss()
_a : Optional[int] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
_a : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , snake_case__ , )
class a ( snake_case__ , snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__(__lowerCAmelCase )
super()._init_backbone(__lowerCAmelCase )
_a : Dict = [config.embedding_size] + config.hidden_sizes
_a : Optional[int] = ResNetEmbeddings(__lowerCAmelCase )
_a : Optional[int] = ResNetEncoder(__lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Any:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Dict = self.embedder(__lowerCAmelCase )
_a : Any = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
_a : Optional[int] = outputs.hidden_states
_a : List[str] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
| 707 |
'''simple docstring'''
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if len(A ) <= 1:
return [tuple(A )]
_a : str = []
def generate(A , A ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , A )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_a , _a : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_a , _a : str = arr[k - 1], arr[0]
generate(k - 1 , A )
generate(len(A ) , A )
return res
if __name__ == "__main__":
UpperCAmelCase_ : Any = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase_ : Tuple = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 424 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase_ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase( __lowerCamelCase ):
with open(__lowerCamelCase , 'rb' ) as f:
__a = Image.open(__lowerCamelCase )
return im.convert('RGB' )
@dataclass
class a__ :
A__ : Optional[str] = field(
default=__snake_case , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(default=__snake_case , metadata={'help': 'A folder containing the training data.'} )
A__ : Optional[str] = field(default=__snake_case , metadata={'help': 'A folder containing the validation data.'} )
A__ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ : Optional[int] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : Optional[int] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class a__ :
A__ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : str = field(default=__snake_case , metadata={'help': 'Name or path of preprocessor config.'} )
A__ : bool = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ : bool = field(
default=__snake_case , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase( __lowerCamelCase ):
__a = torch.stack([example['pixel_values'] for example in examples] )
__a = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , __lowerCamelCase , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__a = {}
if data_args.train_dir is not None:
__a = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__a = os.path.join(data_args.validation_dir , '**' )
__a = load_dataset(
'imagefolder' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__a = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0:
__a = dataset['train'].train_test_split(data_args.train_val_split )
__a = split['train']
__a = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__a = dataset['train'].features['labels'].names
__a , __a = {}, {}
for i, label in enumerate(__lowerCamelCase ):
__a = str(__lowerCamelCase )
__a = label
# Load the accuracy metric from the datasets package
__a = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel=__lowerCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__a = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__a = image_processor.size['shortest_edge']
else:
__a = (image_processor.size['height'], image_processor.size['width'])
__a = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__a = Compose(
[
RandomResizedCrop(__lowerCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__a = Compose(
[
Resize(__lowerCamelCase ),
CenterCrop(__lowerCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCamelCase ):
__a = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(__lowerCamelCase ):
__a = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__a = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__a = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCamelCase )
# Initalize our trainer
__a = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a = trainer.evaluate()
trainer.log_metrics('eval' , __lowerCamelCase )
trainer.save_metrics('eval' , __lowerCamelCase )
# Write model card and (optionally) push to hub
__a = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
if __name__ == "__main__":
main()
| 559 | import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__a = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
__a = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__lowerCamelCase ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__a = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
__a = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__lowerCamelCase ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__a = result.headers['Location']
__a = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__a = os.path.join(__lowerCamelCase , f'''{artifact_name}.zip''' )
with open(__lowerCamelCase , 'wb' ) as fp:
fp.write(response.content )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = []
__a = []
__a = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__a = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a = line[: line.index(': ' )]
__a = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
__a = line[len('FAILED ' ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__a = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` '''
f'''and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
__a = None
if job_name and job_links:
__a = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__a = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = []
__a = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = Counter()
counter.update([x[1] for x in logs] )
__a = counter.most_common()
__a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCAmelCase( __lowerCamelCase ):
__a = test.split('::' )[0]
if test.startswith('tests/models/' ):
__a = test.split('/' )[2]
else:
__a = None
return test
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a = [x for x in logs if x[2] is not None]
__a = {x[2] for x in logs}
__a = {}
for test in tests:
__a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a = counter.most_common()
__a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a = sum(error_counts.values() )
if n_errors > 0:
__a = {'count': n_errors, 'errors': error_counts}
__a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCAmelCase( __lowerCamelCase ):
__a = '| no. | error | status |'
__a = '|-:|:-|:-|'
__a = [header, sep]
for error in reduced_by_error:
__a = reduced_by_error[error]['count']
__a = f'''| {count} | {error[:100]} | |'''
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = '| model | no. of errors | major error | count |'
__a = '|-:|-:|-:|-:|'
__a = [header, sep]
for model in reduced_by_model:
__a = reduced_by_model[model]['count']
__a , __a = list(reduced_by_model[model]['errors'].items() )[0]
__a = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
lowerCamelCase_ : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase_ : Any = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase_ : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase_ : int = k.find(""" / """)
lowerCamelCase_ : str = k[index + len(""" / """) :]
lowerCamelCase_ : Union[str, Any] = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase_ : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase_ : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase_ : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ : Optional[int] = reduce_by_error(errors)
lowerCamelCase_ : Optional[int] = reduce_by_model(errors)
lowerCamelCase_ : Any = make_github_table(reduced_by_error)
lowerCamelCase_ : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 559 | 1 |
lowerCamelCase__ : Optional[int] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCamelCase__ : str = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
lowercase__ : Optional[Any] = from_type.lower().strip("""s""" )
lowercase__ : str = to_type.lower().strip("""s""" )
lowercase__ : Any = UNIT_SYMBOL.get(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = UNIT_SYMBOL.get(lowercase_ , lowercase_ )
if from_sanitized not in METRIC_CONVERSION:
lowercase__ : int = (
F'Invalid \'from_type\' value: {from_type!r}.\n'
F'Conversion abbreviations are: {", ".join(lowercase_ )}'
)
raise ValueError(lowercase_ )
if to_sanitized not in METRIC_CONVERSION:
lowercase__ : Any = (
F'Invalid \'to_type\' value: {to_type!r}.\n'
F'Conversion abbreviations are: {", ".join(lowercase_ )}'
)
raise ValueError(lowercase_ )
lowercase__ : List[str] = METRIC_CONVERSION[from_sanitized]
lowercase__ : List[Any] = METRIC_CONVERSION[to_sanitized]
lowercase__ : Union[str, Any] = 1
if from_exponent > to_exponent:
lowercase__ : str = from_exponent - to_exponent
else:
lowercase__ : List[Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowercase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 495 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : Optional[int] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _snake_case :
__lowerCAmelCase : Optional[int] = PegasusConfig
__lowerCAmelCase : str = {}
__lowerCAmelCase : int = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
'''simple docstring'''
lowercase__ : Any = parent
lowercase__ : int = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : str = is_training
lowercase__ : str = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = eos_token_id
lowercase__ : Any = pad_token_id
lowercase__ : Any = bos_token_id
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase__ : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase__ : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : Any = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = 20
lowercase__ : str = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : str = model.encode(inputs_dict["""input_ids"""])
lowercase__ , lowercase__ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase__ : int = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : str = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Any = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : Tuple = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = model.encode(inputs_dict["""input_ids"""])
lowercase__ , lowercase__ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : Tuple = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Dict = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
lowercase__ : int = np.not_equal(lowercase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase__ : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[Any] = False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = FlaxPegasusModelTester(self)
lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE_)
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
return model.encode(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
with self.subTest("""JIT Enabled"""):
lowercase__ : List[Any] = encode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase__ : Optional[int] = encode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(jitted_output.shape , output.shape)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase__ : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , encoder_outputs=SCREAMING_SNAKE_CASE_ , )
with self.subTest("""JIT Enabled"""):
lowercase__ : List[str] = decode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase__ : Tuple = decode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.ones((1, 1))
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase__ : Optional[int] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase__ : Union[str, Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 , padding=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = model.generate(**SCREAMING_SNAKE_CASE_ , num_beams=2).sequences
lowercase__ : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
assert tgt_text == decoded
| 495 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.