code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Any , snake_case :pyspark.sql.DataFrame , snake_case :Optional[NamedSplit] = None , snake_case :Optional[Features] = None , snake_case :bool = True , snake_case :str = None , snake_case :bool = False , snake_case :str = None , snake_case :bool = True , snake_case :str = "arrow" , **snake_case :int , ):
'''simple docstring'''
super().__init__(
split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , **snake_case , )
A_ : Any = load_from_cache_file
A_ : Union[str, Any] = file_format
A_ : Tuple = Spark(
df=snake_case , features=snake_case , cache_dir=snake_case , working_dir=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A_ : int = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 300 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 1 |
def A_ ( _UpperCAmelCase ):
if number > 0:
raise ValueError("input must be a negative integer" )
SCREAMING_SNAKE_CASE_: List[Any] = len(bin(_UpperCAmelCase )[3:] )
SCREAMING_SNAKE_CASE_: Any = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE_: List[Any] = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=14 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Dict=0.02 , ):
SCREAMING_SNAKE_CASE_: List[Any] = parent
SCREAMING_SNAKE_CASE_: Any = batch_size
SCREAMING_SNAKE_CASE_: str = seq_length
SCREAMING_SNAKE_CASE_: Dict = is_training
SCREAMING_SNAKE_CASE_: str = use_input_mask
SCREAMING_SNAKE_CASE_: int = use_token_type_ids
SCREAMING_SNAKE_CASE_: Tuple = use_labels
SCREAMING_SNAKE_CASE_: int = vocab_size
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] = rotary_dim
SCREAMING_SNAKE_CASE_: Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = intermediate_size
SCREAMING_SNAKE_CASE_: List[str] = hidden_act
SCREAMING_SNAKE_CASE_: List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: int = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = None
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_: Tuple = vocab_size - 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = 20
SCREAMING_SNAKE_CASE_: Any = model_class_name(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.init_cache(input_ids.shape[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4")
SCREAMING_SNAKE_CASE_: List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_: int = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_: Dict = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: Tuple = model_class_name(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
SCREAMING_SNAKE_CASE_: List[str] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_: Any = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_: Optional[int] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
@require_flax
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_UpperCAmelCase : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left")
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: str = model.config.eos_token_id
SCREAMING_SNAKE_CASE_: int = jax.jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id).sequences
SCREAMING_SNAKE_CASE_: int = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_: Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: int = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: Optional[Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model(**lowerCAmelCase__).to_tuple()
SCREAMING_SNAKE_CASE_: List[str] = fx_model(**lowerCAmelCase__).to_tuple()
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = fx_model_loaded(**lowerCAmelCase__).to_tuple()
self.assertEqual(
len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_: Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: Any = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_: int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model(**lowerCAmelCase__).to_tuple()
SCREAMING_SNAKE_CASE_: Optional[Any] = fx_model(**lowerCAmelCase__).to_tuple()
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__)
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = pt_model_loaded(**lowerCAmelCase__).to_tuple()
self.assertEqual(
len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B")
SCREAMING_SNAKE_CASE_: str = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCAmelCase__)
| 127 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( A__ , A__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'convnextv2'
def __init__( self : Tuple ,lowercase__ : Dict=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : List[str]=None ,lowercase__ : Tuple="gelu" ,lowercase__ : Dict=0.0_2 ,lowercase__ : List[Any]=1e-1_2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : int=2_2_4 ,lowercase__ : int=None ,lowercase__ : Tuple=None ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_stages
__lowercase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
__lowercase = [3, 3, 9, 3] if depths is None else depths
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = drop_path_rate
__lowercase = image_size
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(self.depths ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 104 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
snake_case__ = PegasusConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[str]=13 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : int=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[str]=20 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Dict=0 , ) -> str:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> str:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase ( a__ : Dict , a__ : str , a__ : str , a__ : Optional[int]=None , a__ : str=None , ) -> List[str]:
if attention_mask is None:
_UpperCamelCase = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = FlaxPegasusModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : Dict , __UpperCamelCase : str=None , **__UpperCamelCase : Dict ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__UpperCamelCase )
_UpperCamelCase = np.ones((1, 1) )
_UpperCamelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> Any:
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors='''np''' , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
_UpperCamelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCamelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 54 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''deta'''
snake_case__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=900 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Dict=6 , __UpperCamelCase : Union[str, Any]=2048 , __UpperCamelCase : str=8 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Union[str, Any]=1024 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Dict=256 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : Dict=True , __UpperCamelCase : str=False , __UpperCamelCase : List[Any]="sine" , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=300 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=1 , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : int=1 , __UpperCamelCase : str=5 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Tuple=0.2_5 , **__UpperCamelCase : Union[str, Any] , ) -> Optional[int]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__UpperCamelCase )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.d_model
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 54 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _lowerCAmelCase ( __snake_case ):
"""simple docstring"""
__magic_name__ :Dict = """umt5"""
__magic_name__ :str = ["""past_key_values"""]
def __init__( self , __UpperCAmelCase=2_5_0_1_1_2 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=6_4 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=8 , __UpperCAmelCase=None , __UpperCAmelCase=6 , __UpperCAmelCase=3_2 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=1.0 , __UpperCAmelCase="gated-gelu" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="T5Tokenizer" , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=UpperCamelCase__ , tokenizer_class=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCAmelCase__ :Optional[int] = vocab_size
lowerCAmelCase__ :Union[str, Any] = d_model
lowerCAmelCase__ :int = d_kv
lowerCAmelCase__ :List[Any] = d_ff
lowerCAmelCase__ :Any = num_layers
lowerCAmelCase__ :Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ :Dict = num_heads
lowerCAmelCase__ :Dict = relative_attention_num_buckets
lowerCAmelCase__ :Dict = relative_attention_max_distance
lowerCAmelCase__ :Dict = dropout_rate
lowerCAmelCase__ :str = layer_norm_epsilon
lowerCAmelCase__ :Dict = initializer_factor
lowerCAmelCase__ :int = feed_forward_proj
lowerCAmelCase__ :List[str] = use_cache
lowerCAmelCase__ :Union[str, Any] = self.feed_forward_proj.split('-' )
lowerCAmelCase__ :int = act_info[-1]
lowerCAmelCase__ :List[Any] = act_info[0] == "gated"
if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ :Union[str, Any] = "gelu_new"
@property
def snake_case ( self ):
'''simple docstring'''
return self.d_model
@property
def snake_case ( self ):
'''simple docstring'''
return self.num_heads
@property
def snake_case ( self ):
'''simple docstring'''
return self.num_layers
class _lowerCAmelCase ( __snake_case ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ :Tuple = "past_encoder_sequence + sequence"
lowerCAmelCase__ :Union[str, Any] = {0: "batch"}
lowerCAmelCase__ :str = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ :str = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ :Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case ( self ):
'''simple docstring'''
return 1_3
@property
def snake_case ( self ):
'''simple docstring'''
return 5E-4
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__A = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__A = TaTokenizerFast
__A = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__A = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 254 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = emb.weight.shape
SCREAMING_SNAKE_CASE : int = nn.Linear(_lowerCAmelCase ,_lowerCAmelCase ,bias=_lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = emb.weight.data
return lin_layer
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE : Any = key.replace('moe_layer.experts.0' ,f"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE : List[Any] = key.replace('moe_layer.experts.' ,'ffn.experts.expert_' )
if "gate" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace('.moe_layer.gate.wg' ,'.ffn.router.classifier' )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('.fc2.' ,'.ffn.fc2.' )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE : Dict = key.replace('.fc1.' ,'.ffn.fc1.' )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace('.encoder_attn.' ,'.cross_attention.' )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('encoder_attn_layer_norm' ,'cross_attention_layer_norm' )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE : Dict = key.replace('final_layer_norm' ,'ff_layer_norm' )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[old_key]
return new_dict
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Tuple ,__UpperCamelCase: Dict = WEIGHTS_NAME ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
for expert in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = switch_checkpoint_path + f"-rank-{expert}.pt"
if os.path.isfile(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = torch.load(_lowerCAmelCase )["model"]
remove_ignore_keys_(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(_lowerCAmelCase ,_lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
_lowerCAmelCase ,weights_name.replace('.bin' ,f"-{len(_lowerCAmelCase )+1:05d}-of-???.bin" ) )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCAmelCase )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE : int = os.path.join(_lowerCAmelCase ,weights_name.replace('.bin' ,f"-{len(_lowerCAmelCase )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(switch_checkpoint_path + '-shared.pt' )["model"]
remove_ignore_keys_(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(_lowerCAmelCase ,_lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCAmelCase ) == 1:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE : List[str] = {}
for idx, shard in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = weights_name.replace('.bin' ,f"-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin" )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_lowerCAmelCase ,weights_name.replace('.bin' ,f"-{idx+1:05d}-of-???.bin" ) )
os.rename(_lowerCAmelCase ,os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) )
for key in shard:
SCREAMING_SNAKE_CASE : Optional[Any] = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE : Tuple = {"total_size": total_size}
SCREAMING_SNAKE_CASE : str = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) ,'w' ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : str = json.dumps(_lowerCAmelCase ,indent=2 ,sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
UpperCamelCase_ = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCamelCase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 251 |
def A_ ( _lowerCAmelCase = 50 ) -> int:
UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ = get_sagemaker_input()
else:
UpperCAmelCase__ = get_cluster_input()
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ = subparsers.add_parser("""config""" , description=SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = argparse.ArgumentParser("""Accelerate config command""" , description=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(SCREAMING_SNAKE_CASE__ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE__ )
print(F'''accelerate configuration saved at {config_file}''' )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = config_command_parser()
UpperCAmelCase__ = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """pix2struct_text_model"""
lowerCAmelCase_ : str = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCAmelCase : Dict=5_02_44 , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=1_28 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=1E-6 , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = d_kv
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase__ = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """pix2struct_vision_model"""
def __init__( self : Any , _UpperCAmelCase : List[Any]=7_68 , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict="gelu_new" , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=40_96 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=1_28 , **_UpperCAmelCase : int , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = patch_embed_hidden_size
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = dense_act_fn
UpperCAmelCase__ = seq_len
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """pix2struct"""
lowerCAmelCase_ : Union[str, Any] = True
def __init__( self : int , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase__ = PixaStructTextConfig(**_UpperCAmelCase )
UpperCAmelCase__ = PixaStructVisionConfig(**_UpperCAmelCase )
UpperCAmelCase__ = self.text_config.decoder_start_token_id
UpperCAmelCase__ = self.text_config.pad_token_id
UpperCAmelCase__ = self.text_config.eos_token_id
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 61 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase = random.Random()
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
if rng is None:
_lowerCamelCase : List[Any] =global_rng
_lowerCamelCase : Union[str, Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple=7 , lowercase_ : Tuple=400 , lowercase_ : Union[str, Any]=2000 , lowercase_ : Tuple=10 , lowercase_ : Union[str, Any]=160 , lowercase_ : Optional[Any]=8 , lowercase_ : Dict=0.0 , lowercase_ : List[str]=4000 , lowercase_ : Dict=False , lowercase_ : str=True , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =parent
_lowerCamelCase : List[Any] =batch_size
_lowerCamelCase : str =min_seq_length
_lowerCamelCase : Optional[Any] =max_seq_length
_lowerCamelCase : Any =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : List[Any] =padding_value
_lowerCamelCase : str =sampling_rate
_lowerCamelCase : Tuple =return_attention_mask
_lowerCamelCase : Optional[Any] =do_normalize
_lowerCamelCase : str =feature_size
_lowerCamelCase : Optional[int] =chunk_length
_lowerCamelCase : int =hop_length
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : int , lowercase_ : List[Any]=False , lowercase_ : Tuple=False ) -> str:
"""simple docstring"""
def _flatten(lowercase_ : Union[str, Any] ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
_lowerCamelCase : List[str] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase : str =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Union[str, Any] =[np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Any =WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =WhisperFeatureExtractionTester(self )
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any =feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
_lowerCamelCase : List[str] =self.feature_extraction_class.from_pretrained(lowercase_ )
_lowerCamelCase : Any =feat_extract_first.to_dict()
_lowerCamelCase : Optional[int] =feat_extract_second.to_dict()
_lowerCamelCase : int =feat_extract_first.mel_filters
_lowerCamelCase : Tuple =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple =os.path.join(lowercase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
_lowerCamelCase : Any =self.feature_extraction_class.from_json_file(lowercase_ )
_lowerCamelCase : List[Any] =feat_extract_first.to_dict()
_lowerCamelCase : Optional[Any] =feat_extract_second.to_dict()
_lowerCamelCase : Optional[int] =feat_extract_first.mel_filters
_lowerCamelCase : Tuple =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Tuple =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCamelCase : Dict =[np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : str =feature_extractor(lowercase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase : List[str] =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowerCamelCase : Optional[int] =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
_lowerCamelCase : Any =feature_extractor(lowercase_ , return_tensors='np' ).input_features
_lowerCamelCase : Dict =feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Union[str, Any] =[floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCamelCase : List[Any] =np.asarray(lowercase_ )
_lowerCamelCase : Optional[int] =feature_extractor(lowercase_ , return_tensors='np' ).input_features
_lowerCamelCase : Tuple =feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test truncation required
_lowerCamelCase : List[Any] =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowerCamelCase : Dict =[np.asarray(lowercase_ ) for speech_input in speech_inputs]
_lowerCamelCase : Union[str, Any] =[x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase : List[str] =[np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase : Any =feature_extractor(lowercase_ , return_tensors='np' ).input_features
_lowerCamelCase : List[Any] =feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
_lowerCamelCase : List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict =np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCamelCase : Union[str, Any] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : Any =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase : Union[str, Any] =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase ( self : int , lowercase_ : Tuple ) -> int:
"""simple docstring"""
_lowerCamelCase : Tuple =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCamelCase : Optional[Any] =ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCamelCase : Union[str, Any] =self._load_datasamples(1 )
_lowerCamelCase : Dict =WhisperFeatureExtractor()
_lowerCamelCase : List[Any] =feature_extractor(lowercase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Optional[Any] =self._load_datasamples(1 )[0]
_lowerCamelCase : Any =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowerCamelCase : List[str] =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1E-3 ) )
| 199 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ : List[Any] =[('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
_lowerCamelCase : Optional[Any] =CursorInfo()
_lowerCamelCase : Dict =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_lowerCamelCase : Any =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
_lowerCamelCase : Any =CursorInfo()
_lowerCamelCase : Optional[Any] =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_lowerCamelCase : Union[str, Any] =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 199 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float , snake_case : float , snake_case : float , )-> float:
'''simple docstring'''
UpperCAmelCase__ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
UpperCAmelCase__ : Any = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ : Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase : List[str] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298 |
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A__ = False
if num < 0:
A__ = True
A__ = -num
A__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowercase : int ={
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[str] = "ernie_m"
__lowerCAmelCase :Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.0_2 , __lowercase = 1 , __lowercase = 1E-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , **__lowercase )
a__ : List[Any] = vocab_size
a__ : int = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Any = hidden_act
a__ : str = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : List[str] = max_position_embeddings
a__ : List[Any] = initializer_range
a__ : Union[str, Any] = layer_norm_eps
a__ : Dict = classifier_dropout
a__ : Dict = is_decoder
a__ : Union[str, Any] = act_dropout
| 266 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowercase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 266 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Tuple = logging.get_logger(__name__)
a : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'table-transformer'
SCREAMING_SNAKE_CASE__ : str = ['past_key_values']
SCREAMING_SNAKE_CASE__ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=1_0_0 , snake_case=6 , snake_case=2_0_4_8 , snake_case=8 , snake_case=6 , snake_case=2_0_4_8 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=2_5_6 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=0.1 , **snake_case , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase : Tuple = CONFIG_MAPPING['resnet'](out_features=["stage4"] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase : Any = backbone_config.get("model_type" )
UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Dict = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase : int = None, None, None
UpperCAmelCase : str = use_timm_backbone
UpperCAmelCase : List[Any] = backbone_config
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : List[str] = num_queries
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : Optional[Any] = encoder_ffn_dim
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : List[str] = decoder_ffn_dim
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : int = dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : int = activation_function
UpperCAmelCase : Optional[int] = init_std
UpperCAmelCase : List[Any] = init_xavier_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : Optional[int] = auxiliary_loss
UpperCAmelCase : List[str] = position_embedding_type
UpperCAmelCase : Tuple = backbone
UpperCAmelCase : List[str] = use_pretrained_backbone
UpperCAmelCase : List[str] = dilation
# Hungarian matcher
UpperCAmelCase : Dict = class_cost
UpperCAmelCase : Union[str, Any] = bbox_cost
UpperCAmelCase : str = giou_cost
# Loss coefficients
UpperCAmelCase : str = mask_loss_coefficient
UpperCAmelCase : Tuple = dice_loss_coefficient
UpperCAmelCase : Optional[int] = bbox_loss_coefficient
UpperCAmelCase : List[str] = giou_loss_coefficient
UpperCAmelCase : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def A_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A_ ( self ):
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = version.parse("1.11" )
@property
def A_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A_ ( self ):
'''simple docstring'''
return 1e-5
@property
def A_ ( self ):
'''simple docstring'''
return 1_2
| 311 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'ylacombe/bark-small'
__A : List[str] = tempfile.mkdtemp()
__A : int = 'en_speaker_1'
__A : Tuple = 'This is a test string'
__A : Any = 'speaker_embeddings_path.json'
__A : str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_tokenizer()
__A : List[Any] = BarkProcessor(tokenizer=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : List[str] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__A : Union[str, Any] = 35
__A : Any = 2
__A : Optional[int] = 8
__A : List[Any] = {
'semantic_prompt': np.ones(_UpperCAmelCase),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
__A : Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase)
__A : Tuple = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([])).tolist())
# test loading voice preset from npz file
__A : Optional[int] = os.path.join(self.tmpdirname , 'file.npz')
np.savez(_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = processor(text=self.input_string , voice_preset=_UpperCAmelCase)
__A : str = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([])).tolist())
# test loading voice preset from the hub
__A : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_tokenizer()
__A : Tuple = BarkProcessor(tokenizer=_UpperCAmelCase)
__A : int = processor(text=self.input_string)
__A : Any = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist()) | 190 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
__A : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(__snake_case )
if number < 1:
__A : Union[str, Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
__A : Union[str, Any] = [3, 5]
__A : List[Any] = 2
__A : Optional[Any] = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : str = 0
try:
lowercase__ : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""") | 190 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_A = logging.get_logger(__name__)
_A = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
@add_start_docstrings(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = max_length
UpperCamelCase_ = max_position_embeddings
@add_start_docstrings(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = input_ids.shape[-1]
UpperCamelCase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , __UpperCamelCase , )
UpperCamelCase_ = start_length
UpperCamelCase_ = max_new_tokens
UpperCamelCase_ = start_length + max_new_tokens
@add_start_docstrings(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = max_time
UpperCamelCase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
@add_start_docstrings(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return any(criteria(__UpperCamelCase , __UpperCamelCase ) for criteria in self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return stopping_criterium.max_length
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return stopping_criterium.max_length
return None
def lowerCamelCase__ ( a__ : StoppingCriteriaList , a__ : int ) -> StoppingCriteriaList:
UpperCamelCase_ = stopping_criteria.max_length
UpperCamelCase_ = deepcopy(a__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , a__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a__ ) )
return new_stopping_criteria
| 261 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Optional[Any] = CpmAntTokenizer
A__ : Tuple = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
UpperCamelCase_ = """今天天气真好!"""
UpperCamelCase_ = ["""今天""", """天气""", """真""", """好""", """!"""]
UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = """今天天气真好!"""
UpperCamelCase_ = [tokenizer.bos_token] + tokens
UpperCamelCase_ = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 261 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = OmegaConf.load(UpperCamelCase_ )
snake_case = torch.load(UpperCamelCase_ ,map_location='''cpu''' )['''model''']
snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
snake_case = {}
snake_case = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase_ ):
snake_case = state_dict[key]
# extract state_dict for UNetLDM
snake_case = {}
snake_case = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase_ ):
snake_case = state_dict[key]
snake_case = config.model.params.first_stage_config.params
snake_case = config.model.params.unet_config.params
snake_case = VQModel(**UpperCamelCase_ ).eval()
vqvae.load_state_dict(UpperCamelCase_ )
snake_case = UNetLDMModel(**UpperCamelCase_ ).eval()
unet.load_state_dict(UpperCamelCase_ )
snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule='''scaled_linear''' ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=UpperCamelCase_ ,)
snake_case = LDMPipeline(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
pipeline.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 127 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1.0 , __snake_case = None , ):
super().__init__()
snake_case = initial_learning_rate
snake_case = warmup_steps
snake_case = power
snake_case = decay_schedule_fn
snake_case = name
def __call__( self , __snake_case ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case = tf.cast(__snake_case , tf.floataa )
snake_case = tf.cast(self.warmup_steps , tf.floataa )
snake_case = global_step_float / warmup_steps_float
snake_case = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def a_ ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = 0.9 ,UpperCamelCase_ = 0.999 ,UpperCamelCase_ = 1e-8 ,UpperCamelCase_ = None ,UpperCamelCase_ = None ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = 1.0 ,UpperCamelCase_ = None ,):
"""simple docstring"""
snake_case = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase_ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=UpperCamelCase_ ,)
if num_warmup_steps:
snake_case = WarmUp(
initial_learning_rate=UpperCamelCase_ ,decay_schedule_fn=UpperCamelCase_ ,warmup_steps=UpperCamelCase_ ,)
if weight_decay_rate > 0.0:
snake_case = AdamWeightDecay(
learning_rate=UpperCamelCase_ ,weight_decay_rate=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,epsilon=UpperCamelCase_ ,clipnorm=UpperCamelCase_ ,global_clipnorm=UpperCamelCase_ ,exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] ,include_in_weight_decay=UpperCamelCase_ ,)
else:
snake_case = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,epsilon=UpperCamelCase_ ,clipnorm=UpperCamelCase_ ,global_clipnorm=UpperCamelCase_ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case = 0.001 , __snake_case = 0.9 , __snake_case = 0.999 , __snake_case = 1E-7 , __snake_case = False , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "AdamWeightDecay" , **__snake_case , ):
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
snake_case = weight_decay_rate
snake_case = include_in_weight_decay
snake_case = exclude_from_weight_decay
@classmethod
def a_ ( cls , __snake_case ):
snake_case = {'''WarmUp''': WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
snake_case = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def a_ ( self , __snake_case , __snake_case=None , **__snake_case ):
snake_case , snake_case = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case = apply_state or {}
snake_case = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case = self._fallback_apply_state(__snake_case , __snake_case )
snake_case = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def a_ ( self , __snake_case , __snake_case , __snake_case=None ):
snake_case , snake_case = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
snake_case = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None ):
snake_case , snake_case = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
snake_case = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def a_ ( self ):
snake_case = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def a_ ( self , __snake_case ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = []
snake_case = None
@property
def a_ ( self ):
if self._accum_steps is None:
snake_case = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def a_ ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __snake_case ):
if not self._gradients:
snake_case = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__snake_case )}''' )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def a_ ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) )
| 127 | 1 |
from collections.abc import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
lowerCAmelCase__ : float = a
lowerCAmelCase__ : float = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowerCAmelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
lowerCAmelCase__ : List[Any] = mid
else:
lowerCAmelCase__ : List[Any] = mid
lowerCAmelCase__ : List[str] = start + (end - start) / 2.0
return mid
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod() | 307 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 | 307 | 1 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : Any = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
| 54 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( __snake_case, __snake_case ):
@register_to_config
def __init__( self : List[str] , *,
UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , )-> List[Any]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: List[Any] = nn.Parameter(torch.zeros(UpperCamelCase__))
# parameters for additional clip time embeddings
__lowerCAmelCase: Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
# parameters for encoder hidden states
__lowerCAmelCase: List[str] = clip_extra_context_tokens
__lowerCAmelCase: Optional[Any] = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim)
__lowerCAmelCase: str = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: str = nn.LayerNorm(UpperCamelCase__)
def lowercase_ ( self : List[Any] , *, UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str])-> Optional[Any]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCAmelCase: Dict = image_embeddings.shape[0]
__lowerCAmelCase: Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
__lowerCAmelCase: int = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1)
__lowerCAmelCase: List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCAmelCase: List[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCAmelCase: Dict = self.embedding_proj(UpperCamelCase__)
__lowerCAmelCase: Tuple = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__)
__lowerCAmelCase: int = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCAmelCase: Union[str, Any] = self.clip_extra_context_tokens_proj(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens)
__lowerCAmelCase: Dict = clip_extra_context_tokens.permute(0 , 2 , 1)
__lowerCAmelCase: Dict = self.encoder_hidden_states_proj(UpperCamelCase__)
__lowerCAmelCase: Dict = self.text_encoder_hidden_states_norm(UpperCamelCase__)
__lowerCAmelCase: Tuple = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 108 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def lowercase ( _snake_case : Any , _snake_case : List[Any] ) ->int:
"""simple docstring"""
__snake_case : str = a.name
__snake_case : Any = b.name
__snake_case : int = ''''''
__snake_case : Tuple = ''''''
__snake_case : str = a == b
__snake_case : str = name_a
__snake_case : Optional[Any] = name_b
return res
def lowercase ( _snake_case : Dict , _snake_case : List[str] , _snake_case : List[Any] ) ->Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_snake_case , _snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
def lowercase ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[Any] ) ->List[Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_snake_case , _snake_case , _snake_case )
def lowercase ( _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Dict ) ->int:
"""simple docstring"""
__snake_case : int = list(model.graph.initializer )
__snake_case : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : List[str] = inits[i].name
__snake_case : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case )
def lowercase ( _snake_case : List[str] ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = os.path.dirname(_snake_case )
__snake_case : Optional[Any] = os.path.basename(_snake_case )
__snake_case : Any = onnx.load(os.path.join(_snake_case , _snake_case ) )
__snake_case : str = list(model.graph.initializer )
__snake_case : Tuple = set()
__snake_case : List[str] = {}
__snake_case : Optional[int] = []
__snake_case : Optional[Any] = 0
for i in range(len(_snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_snake_case )
dup_set.add(_snake_case )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _snake_case )
total_reduced_size += mem_size
__snake_case : Optional[Any] = inits[i].name
__snake_case : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_snake_case )
else:
__snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
__snake_case : Any = sorted(_snake_case )
_remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case )
__snake_case : List[Any] = '''optimized_''' + model_file_name
__snake_case : Any = os.path.join(_snake_case , _snake_case )
onnx.save(_snake_case , _snake_case )
return new_model
| 102 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_UpperCamelCase = get_logger(__name__)
class _A :
_SCREAMING_SNAKE_CASE : Dict = "dummy_data"
_SCREAMING_SNAKE_CASE : int = "datasets"
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Tuple = dataset_name
__UpperCAmelCase : List[Any] = cache_dir
__UpperCAmelCase : List[Any] = use_local_dummy_data
__UpperCAmelCase : str = config
# download_callbacks take a single url as input
__UpperCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__UpperCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__UpperCAmelCase : Dict = str(__UpperCAmelCase )
# to be downloaded
__UpperCAmelCase : Any = None
__UpperCAmelCase : List[str] = None
@property
def __A ( self ) -> Dict:
'''simple docstring'''
if self._dummy_file is None:
__UpperCAmelCase : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __A ( self ) -> Any:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__UpperCAmelCase : Tuple = cached_path(
__UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase )
return os.path.join(__UpperCAmelCase , self.dummy_file_name )
@property
def __A ( self ) -> Any:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self._bucket_url is None:
__UpperCAmelCase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __A ( self ) -> Dict:
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __A ( self , __UpperCAmelCase , *__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__UpperCAmelCase : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__UpperCAmelCase : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase )
else:
return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase , *__UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return path
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {}
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for single_url in single_urls:
download_callback(__UpperCAmelCase )
else:
__UpperCAmelCase : Union[str, Any] = single_urls
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : str = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls]
else:
__UpperCAmelCase : List[str] = single_urls
__UpperCAmelCase : Any = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) )
__UpperCAmelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__UpperCAmelCase : Tuple = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__UpperCAmelCase : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __UpperCAmelCase ) ) for url in data_url )
__UpperCAmelCase : str = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__UpperCAmelCase : int = [data_url[0]] * len(__UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__UpperCAmelCase : List[str] = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__UpperCAmelCase )
return dummy_data_list
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__UpperCAmelCase : List[str] = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
def __A ( self ) -> int:
'''simple docstring'''
pass
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
def _iter_archive_members(__UpperCAmelCase ):
# this preserves the order of the members inside the ZIP archive
__UpperCAmelCase : Dict = Path(self.dummy_file ).parent
__UpperCAmelCase : Dict = path.relative_to(__UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__UpperCAmelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__UpperCAmelCase )
__UpperCAmelCase : Any = Path(__UpperCAmelCase )
__UpperCAmelCase : int = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("""rb""" )
def __A ( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__UpperCAmelCase ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
| 254 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowercase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
return max(metric_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for gt in ground_truths )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case : Union[str, Any] = []
if args.gold_data_mode == "qa":
snake_case : List[Any] = pd.read_csv(SCREAMING_SNAKE_CASE__ , sep='''\t''' , header=SCREAMING_SNAKE_CASE__ )
for answer_list in data[1]:
snake_case : Dict = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
answers.append(SCREAMING_SNAKE_CASE__ )
else:
snake_case : int = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case : Optional[Any] = [[reference] for reference in references]
snake_case : str = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : str = 100.0 * em / total
snake_case : Optional[int] = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = args.k
snake_case : Union[str, Any] = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case : Tuple = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case : List[str] = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case : int = set(hypo.split('''\t''' )[:k] )
snake_case : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case : str = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
def strip_title(SCREAMING_SNAKE_CASE__ ):
if title.startswith('''"''' ):
snake_case : Optional[int] = title[1:]
if title.endswith('''"''' ):
snake_case : Optional[Any] = title[:-1]
return title
snake_case : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )['''input_ids'''].to(args.device )
snake_case : Any = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = question_enc_outputs[0]
snake_case : Optional[int] = rag_model.retriever(
SCREAMING_SNAKE_CASE__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case : Optional[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case : Union[str, Any] = []
for docs in all_docs:
snake_case : Optional[Any] = [strip_title(SCREAMING_SNAKE_CASE__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(SCREAMING_SNAKE_CASE__ ) )
return provenance_strings
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
snake_case : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
snake_case : Tuple = inputs_dict.input_ids.to(args.device )
snake_case : List[Any] = inputs_dict.attention_mask.to(args.device )
snake_case : Optional[Any] = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case : Union[str, Any] = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info('''Q: {} - A: {}'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return answers
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=SCREAMING_SNAKE_CASE__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=SCREAMING_SNAKE_CASE__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=SCREAMING_SNAKE_CASE__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=SCREAMING_SNAKE_CASE__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=SCREAMING_SNAKE_CASE__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=SCREAMING_SNAKE_CASE__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=SCREAMING_SNAKE_CASE__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=SCREAMING_SNAKE_CASE__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=SCREAMING_SNAKE_CASE__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=SCREAMING_SNAKE_CASE__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case : Union[str, Any] = parser.parse_args()
snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
snake_case : Tuple = {}
if args.model_type is None:
snake_case : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case : Any = args.n_docs
if args.index_name is not None:
snake_case : Union[str, Any] = args.index_name
if args.index_path is not None:
snake_case : Union[str, Any] = args.index_path
else:
snake_case : str = BartForConditionalGeneration
snake_case : List[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , SCREAMING_SNAKE_CASE__ )
snake_case : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case : Union[str, Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(SCREAMING_SNAKE_CASE__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case : Union[str, Any] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , retriever=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.retriever.init_retrieval()
else:
snake_case : Optional[int] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case : Tuple = []
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE__ ) == args.eval_batch_size:
snake_case : Optional[int] = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
preds_file.flush()
snake_case : Any = []
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case : List[str] = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowercase__ = get_args()
main(args)
| 83 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionLDMaDPipeline
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case : int = CLIPTextModel(UpperCamelCase__ )
snake_case : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ) -> Optional[int]:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
snake_case : Optional[int] = torch.manual_seed(UpperCamelCase__ )
else:
snake_case : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : str = StableDiffusionLDMaDPipeline(**UpperCamelCase__ )
snake_case : Union[str, Any] = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : List[str] = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : Tuple = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : int = output.rgb, output.depth
snake_case : str = rgb[0, -3:, -3:, -1]
snake_case : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case : int = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
snake_case : str = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : int = self.get_dummy_components()
snake_case : Any = StableDiffusionLDMaDPipeline(**UpperCamelCase__ )
snake_case : Optional[Any] = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : str = 3 * [inputs['''prompt''']]
# forward
snake_case : Union[str, Any] = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Tuple = rgb_slice_a[0, -3:, -3:, -1]
snake_case : List[str] = depth_slice_a[0, -3:, -1]
snake_case : int = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : Optional[int] = 3 * [inputs.pop('''prompt''' )]
snake_case : Dict = ldmad_pipe.tokenizer(
UpperCamelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='''pt''' , )
snake_case : Optional[Any] = text_inputs['''input_ids'''].to(UpperCamelCase__ )
snake_case : Any = ldmad_pipe.text_encoder(UpperCamelCase__ )[0]
snake_case : Tuple = prompt_embeds
# forward
snake_case : List[Any] = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Dict = output.rgb, output.depth
snake_case : Any = rgb_slice_a[0, -3:, -3:, -1]
snake_case : Dict = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : List[str] = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
snake_case : Optional[Any] = StableDiffusionLDMaDPipeline(**UpperCamelCase__ )
snake_case : Dict = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : str = '''french fries'''
snake_case : List[str] = ldmad_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Union[str, Any] = rgb[0, -3:, -3:, -1]
snake_case : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case : Dict = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
snake_case : Any = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]="cpu" , UpperCamelCase__ : Optional[int]=torch.floataa , UpperCamelCase__ : Union[str, Any]=0 ) -> Any:
"""simple docstring"""
snake_case : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) )
snake_case : Any = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
snake_case : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
snake_case : Optional[Any] = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_inputs(UpperCamelCase__ )
snake_case : str = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : List[Any] = output.rgb, output.depth
snake_case : Optional[Any] = rgb[0, -3:, -3:, -1].flatten()
snake_case : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
snake_case : Optional[Any] = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
snake_case : str = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any="cpu" , UpperCamelCase__ : Optional[int]=torch.floataa , UpperCamelCase__ : Optional[int]=0 ) -> str:
"""simple docstring"""
snake_case : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) )
snake_case : int = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
snake_case : List[str] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_inputs(UpperCamelCase__ )
snake_case : str = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Union[str, Any] = 0.495_586
snake_case : Tuple = 0.33_795_515
snake_case : Dict = 112.48_518
snake_case : Optional[int] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = self.get_inputs(UpperCamelCase__ )
snake_case : List[Any] = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Tuple = 0.4_194_127
snake_case : Optional[Any] = 0.35_375_586
snake_case : Any = 0.5_638_502
snake_case : int = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 83 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase ( lowercase__ ):
def __init__( self , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = params
UpperCamelCase = np.array(lowercase_ )
UpperCamelCase = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ) -> Optional[int]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.lengths )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.params.max_model_input_size
UpperCamelCase = self.lengths > max_len
logger.info(F'''Splitting {sum(lowercase_ )} too long sequences.''' )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
UpperCamelCase = []
UpperCamelCase = []
if self.params.mlm:
UpperCamelCase = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
UpperCamelCase = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
UpperCamelCase = np.array(lowercase_ )
UpperCamelCase = np.array(lowercase_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = len(self )
UpperCamelCase = self.lengths > 11
UpperCamelCase = self.token_ids[indices]
UpperCamelCase = self.lengths[indices]
UpperCamelCase = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase = self.params.special_tok_ids["unk_token"]
UpperCamelCase = len(self )
UpperCamelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase = (unk_occs / self.lengths) < 0.5
UpperCamelCase = self.token_ids[indices]
UpperCamelCase = self.lengths[indices]
UpperCamelCase = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = [t[0] for t in batch]
UpperCamelCase = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
UpperCamelCase = max(lowercase_ )
# Pad token ids
if self.params.mlm:
UpperCamelCase = self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase = self.params.special_tok_ids["unk_token"]
UpperCamelCase = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
UpperCamelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 222 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ):
UpperCAmelCase_ : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase )
return dataset
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_dataset()
UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
| 61 | 0 |
"""simple docstring"""
from collections import namedtuple
_A = namedtuple("""from_to""", """from_ to""")
_A = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(lowerCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(lowerCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCAmelCase__ : List[str] = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : int = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : int = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase__ : List[str] = problem_type["""title"""]
UpperCAmelCase__ : List[Any] = problem_type["""num_labels"""]
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ : Optional[int] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCAmelCase__ : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
UpperCAmelCase__ : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _a (self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Dict = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ : int = model(_lowerCamelCase )
| 166 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = 9
A__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A__ = kruskal(lowerCamelCase__ , lowerCamelCase__ )
A__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase__ ) == sorted(lowerCamelCase__ )
| 354 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
A__ = update_area_of_max_square(row + 1 , col + 1 )
A__ = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A__ = 1 + min([right, diagonal, down] )
A__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
A__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
A__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
A__ = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A__ = 1 + min([right, diagonal, down] )
A__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
A__ = sub_problem_sol
return sub_problem_sol
else:
return 0
A__ = [0]
A__ = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
A__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
A__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A__ = dp_array[row][col + 1]
A__ = dp_array[row + 1][col + 1]
A__ = dp_array[row + 1][col]
if mat[row][col] == 1:
A__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
A__ = 0
return largest_square_area
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
A__ = [0] * (cols + 1)
A__ = [0] * (cols + 1)
A__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A__ = current_row[col + 1]
A__ = next_row[col + 1]
A__ = next_row[col]
if mat[row][col] == 1:
A__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
A__ = 0
A__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 282 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCAmelCase_ = '''Dummy User'''
lowerCAmelCase_ = '''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'''
lowerCAmelCase_ = '''https://hub-ci.huggingface.co'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCamelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
def _cleanup_repo(_UpperCamelCase ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
@contextmanager
def _temporary_repo(_UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Tuple = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Any = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : List[Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
snake_case_ : Union[str, Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : int ) -> list:
_lowerCAmelCase : Optional[int] = word.split()
def justify(_lowerCamelCase : list ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> str:
_lowerCAmelCase : List[str] = max_width - width
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
if len(_lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowerCAmelCase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowerCAmelCase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowerCAmelCase : int = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowerCamelCase ):
num_spaces_between_words_list[i] += 1
_lowerCAmelCase : Any = []
for i in range(_lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowerCamelCase )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : list[str] = []
_lowerCAmelCase : Union[str, Any] = 0
for word in words:
if width + len(_lowerCamelCase ) + len(_lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowerCamelCase )
width += len(_lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) )
# reset new line and new width
_lowerCAmelCase , _lowerCAmelCase : Tuple = [word], len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = max_width - width - len(_lowerCamelCase )
answer.append(""" """.join(_lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 126 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100 ) -> int:
_lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 126 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
lowerCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
lowerCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker | 190 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = tempfile.mkdtemp()
__A : str = 5
# Realm tok
__A : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
__A : Tuple = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__A : int = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_config()
__A : str = self.get_dummy_retriever()
__A : List[str] = retriever.tokenizer
__A : Dict = np.array([0, 3] , dtype='long')
__A : Dict = tokenizer(['Test question']).input_ids
__A : Optional[Any] = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : str = config.reader_seq_len
__A ,__A ,__A ,__A : List[str] = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_config()
__A : Any = self.get_dummy_retriever()
__A : str = retriever.tokenizer
__A : Dict = np.array([0, 3, 5] , dtype='long')
__A : Tuple = tokenizer(['Test question']).input_ids
__A : Union[str, Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : Dict = config.reader_seq_len
__A ,__A ,__A ,__A : str = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__A : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__A : int = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__A : Tuple = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record') | 190 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def lowerCAmelCase_ ( *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
pass
def UpperCamelCase( lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
snake_case_ = np.array(lowercase_ )
snake_case_ = npimg.shape
return {"hash": hashimage(lowercase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCamelCase_ : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case_ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
snake_case_ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
snake_case_ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = """facebook/sam-vit-huge"""
snake_case_ = pipeline("""mask-generation""" , model=a__ )
snake_case_ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
snake_case_ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , ) | 359 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''yjernite/retribert-base-uncased''': 512,
}
lowerCamelCase_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Union[str, Any] = RetriBertTokenizer
lowerCamelCase_ : str = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> List[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCamelCase )
snake_case_ = do_lower_case
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> str:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 34 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a ):
# Checks if the entire collection has been sorted
if len(a ) <= 1 or n <= 1:
return
insert_next(a , n - 1 )
rec_insertion_sort(a , n - 1 )
def _lowerCamelCase( a , a ):
# Checks order between adjacent elements
if index >= len(a ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__a , __a = (
collection[index],
collection[index - 1],
)
insert_next(a , index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:str = input("""Enter integers separated by spaces: """)
SCREAMING_SNAKE_CASE__:list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 261 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int , lowercase_ : List[str] ) -> Dict:
_lowerCamelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCamelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
_lowerCamelCase = F"""{src_lang}-{tgt_lang}"""
_lowerCamelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_lowerCamelCase = os.path.join(lowercase_ , '''README.md''' )
print(F"""Generating {path}""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowercase_ )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE : List[str] = Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE : str = model_name.split('''-''')
__SCREAMING_SNAKE_CASE : List[str] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=7 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: List[str]=18 , UpperCamelCase: List[str]=30 , UpperCamelCase: Any=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=None , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , ) -> Tuple:
snake_case__ = size if size is not None else {'height': 18, 'width': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case__ = DPTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Dict ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]:
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 307 |
import os
import string
import sys
__UpperCamelCase : List[Any] = 1 << 8
__UpperCamelCase : Union[str, Any] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__UpperCamelCase : Optional[Any] = KEYMAP["""up"""]
__UpperCamelCase : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
__UpperCamelCase : List[Any] = []
__UpperCamelCase : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : List[str] = ord(str(i))
def a_ ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
snake_case__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case__ = chr(KEYMAP['esc'] )
except KeyError:
snake_case__ = cha[1]
else:
snake_case__ = ch.decode(_A )
else:
snake_case__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ = sys.stdin.fileno()
snake_case__ = termios.tcgetattr(_A )
try:
tty.setraw(_A )
snake_case__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
snake_case__ = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
snake_case__ = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(snake_case ) / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def a__ ( SCREAMING_SNAKE_CASE : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).lstrip("./" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return f"""{i * " "}*""" if i else "\n##"
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(SCREAMING_SNAKE_CASE )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def a__ ( SCREAMING_SNAKE_CASE : str = "." ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = os.path.split(SCREAMING_SNAKE_CASE )
if filepath != old_path:
lowerCAmelCase : Dict = print_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase : Union[str, Any] = f"""{filepath}/{filename}""".replace(" " , "%20" )
lowerCAmelCase : Union[str, Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"""{md_prefix(SCREAMING_SNAKE_CASE )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 108 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108 | 1 |
import os
import sys
import unittest
snake_case__ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
snake_case__ : Tuple = os.path.join(git_repo_path, 'src', 'diffusers')
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
__A = find_backend(''' if not is_torch_available():''' )
self.assertEqual(lowercase__ , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__A = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(lowercase__ , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__A = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(lowercase__ , '''torch_and_transformers_and_onnx''' )
def _lowerCAmelCase (self :Dict )-> Optional[int]:
__A = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowercase__ )
self.assertIn('''torch_and_transformers''' , lowercase__ )
self.assertIn('''flax_and_transformers''' , lowercase__ )
self.assertIn('''torch_and_transformers_and_onnx''' , lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def _lowerCAmelCase (self :Dict )-> List[Any]:
__A = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowercase__ , '''\nCONSTANT = None\n''' )
__A = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowercase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__A = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__A = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowercase__ , lowercase__ )
def _lowerCAmelCase (self :Optional[int] )-> List[Any]:
__A = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__A = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowercase__ )
| 358 |
import math
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( lowerCamelCase: float = 0.1 ) -> int:
'''simple docstring'''
__A = 3
__A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ : int = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A__ ( UpperCAmelCase_ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : List[str] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
| 83 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase__ ( lowercase ):
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : str = '1'
_UpperCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : List[Any] = self.get_env()
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_UpperCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : Dict = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = '\nfrom transformers import pipeline\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_UpperCamelCase : Union[str, Any] = self.get_env()
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n '
_UpperCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 83 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366 | """simple docstring"""
import os
def lowerCAmelCase__ ( _UpperCamelCase : str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as in_file:
snake_case = in_file.read()
snake_case = [[int(_UpperCamelCase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
snake_case = [[0 for cell in row] for row in grid]
snake_case = len(grid[0] )
snake_case = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
snake_case = grid[0][0]
for i in range(1 , _UpperCamelCase ):
snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , _UpperCamelCase ):
snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , _UpperCamelCase ):
for j in range(1 , _UpperCamelCase ):
snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 149 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """marian"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=5_8_1_0_1 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=1_0_2_4 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : List[Any]=4_0_9_6 , _lowerCAmelCase : int=1_6 , _lowerCAmelCase : Tuple=1_2 , _lowerCAmelCase : Optional[int]=4_0_9_6 , _lowerCAmelCase : Any=1_6 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : str=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Any=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Union[str, Any]=5_8_1_0_0 , _lowerCAmelCase : str=False , _lowerCAmelCase : Tuple=5_8_1_0_0 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =decoder_vocab_size or vocab_size
__lowercase =max_position_embeddings
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =use_cache
__lowercase =encoder_layers
__lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCamelCase ( self : int):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super().outputs
else:
__lowercase =super(_lowerCAmelCase , self).outputs
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self : int , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Generate decoder inputs
__lowercase =seq_length if not self.use_past else 1
__lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
__lowercase =common_inputs['decoder_input_ids'].shape[1]
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =decoder_seq_length + 3
__lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1)
__lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase =self.num_layers
__lowercase =min(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers
__lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
))
# TODO: test this.
__lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)))
return common_inputs
def __lowerCamelCase ( self : int , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase , __lowercase =self.num_layers
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =common_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase)
]
return common_inputs
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase)
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
__lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase))
return common_inputs
def __lowerCamelCase ( self : Any , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
else:
__lowercase =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
else:
__lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return 1e-4
| 166 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if (ksize % 2) == 0:
__lowercase =ksize + 1
__lowercase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
__lowercase =x - ksize // 2
__lowercase =y - ksize // 2
# degree to radiant
__lowercase =theta / 180 * np.pi
__lowercase =np.cos(_theta )
__lowercase =np.sin(_theta )
# get kernel x
__lowercase =cos_theta * px + sin_theta * py
# get kernel y
__lowercase =-sin_theta * px + cos_theta * py
# fill kernel
__lowercase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCamelCase = out / out.max() * 255
lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 166 | 1 |
A__ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 44 |
def _lowerCAmelCase ( __lowerCAmelCase = 50 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
lowerCAmelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
if metric == "rouge2":
lowerCAmelCase__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowerCAmelCase__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowerCAmelCase__ = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
lowerCAmelCase__ = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=F"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class a_ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Optional[Any] , lowercase__ : str , lowercase__ : int):
'''simple docstring'''
lowerCAmelCase__ = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lowercase__)
@rank_zero_only
def __snake_case ( self : Tuple , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[int]=True):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""")
lowerCAmelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
lowerCAmelCase__ = Path(pl_module.hparams.output_dir)
if type_path == "test":
lowerCAmelCase__ = od / 'test_results.txt'
lowerCAmelCase__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase__ = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCAmelCase__ = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowercase__)
generations_file.parent.mkdir(exist_ok=lowercase__)
with open(lowercase__ , 'a+') as writer:
for key in sorted(lowercase__):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase__ = metrics[key]
if isinstance(lowercase__ , torch.Tensor):
lowerCAmelCase__ = val.item()
lowerCAmelCase__ = F"""{key}: {val:.6f}\n"""
writer.write(lowercase__)
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase__ = '\n'.join(metrics['preds'])
generations_file.open('w+').write(lowercase__)
@rank_zero_only
def __snake_case ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any):
'''simple docstring'''
try:
lowerCAmelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase__ = pl_module.model.num_parameters()
lowerCAmelCase__ = count_trainable_parameters(lowercase__)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6})
@rank_zero_only
def __snake_case ( self : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(lowercase__ , lowercase__ , 'test')
@rank_zero_only
def __snake_case ( self : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Any):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 366 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = {}
with open(lowerCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = '.'.join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if 'lm_head' in full_key else value[0]
lowerCAmelCase__ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , lowerCAmelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return is_used
return is_used
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False ):
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCAmelCase__ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCAmelCase__ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='audio_pretraining' )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 119 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """Salesforce/blip-image-captioning-base"""
SCREAMING_SNAKE_CASE_ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
SCREAMING_SNAKE_CASE_ = """image_captioner"""
SCREAMING_SNAKE_CASE_ = AutoModelForVisionaSeq
SCREAMING_SNAKE_CASE_ = ["""image"""]
SCREAMING_SNAKE_CASE_ = ["""text"""]
def __init__( self :Tuple , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :"Image" ):
"""simple docstring"""
return self.pre_processor(images=lowerCamelCase_ , return_tensors='pt' )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Any ):
"""simple docstring"""
return self.model.generate(**lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str ):
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0].strip() | 126 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 126 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowerCamelCase ( __a :Union[str, Any] , __a :Tuple , __a :Dict , __a :Any=None , __a :List[Any]=None , __a :Any=None , __a :Dict=None , __a :Union[str, Any]=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
A__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ )
if decoder_head_mask is None:
A__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
if cross_attn_head_mask is None:
A__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class A :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : List[str]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Dict="relu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Any=20 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : int=0 , ) -> Optional[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.eos_token_id # Eos Token
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ = input_ids.clamp(self.pad_token_id + 1 )
A__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ = self.get_config()
A__ = prepare_mam_aaa_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def a_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = MaMaaaModel(config=_lowerCamelCase ).get_decoder().to(_lowerCamelCase ).eval()
A__ = inputs_dict['''input_ids''']
A__ = inputs_dict['''attention_mask''']
A__ = inputs_dict['''head_mask''']
# first forward pass
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
A__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )['''last_hidden_state''']
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[
'''last_hidden_state'''
]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 ) )
def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
A__ = MaMaaaModel(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
A__ = model(**_lowerCamelCase )
A__ = outputs.encoder_last_hidden_state
A__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = model.get_encoder()
encoder.save_pretrained(_lowerCamelCase )
A__ = MaMaaaEncoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A__ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = model.get_decoder()
decoder.save_pretrained(_lowerCamelCase )
A__ = MaMaaaDecoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A__ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A (a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Any = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
__lowerCamelCase : int = True
__lowerCamelCase : Any = False
__lowerCamelCase : int = False
def a_ ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
A__ = MaMaaaModelTester(self )
A__ = ConfigTester(self , config_class=_lowerCamelCase )
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ = model_class(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
A__ = model_class.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowerCamelCase )
def a_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowerCamelCase )
def a_ ( self : Any ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
A__ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
if not self.is_encoder_decoder:
A__ = inputs['''input_ids''']
del inputs["input_ids"]
else:
A__ = inputs['''input_ids''']
A__ = inputs.get("""decoder_input_ids""" , _lowerCamelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , _lowerCamelCase )
A__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
A__ = wte(_lowerCamelCase )
else:
A__ = wte(_lowerCamelCase )
A__ = wte(_lowerCamelCase )
with torch.no_grad():
model(**_lowerCamelCase )[0]
def a_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1 ).to(_lowerCamelCase )
A__ = MaMaaaForConditionalGeneration(_lowerCamelCase ).eval().to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase )
model.generate(num_beams=4 , do_sample=_lowerCamelCase , early_stopping=_lowerCamelCase , num_return_sequences=3 )
def __lowerCamelCase ( __a :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return torch.tensor(a__ , dtype=torch.long , device=a__ )
A : Optional[int] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class A (unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self : List[Any] ) -> Any:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def a_ ( self : Tuple ) -> Dict:
"""simple docstring"""
A__ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCamelCase )
A__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
A__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
A__ = prepare_mam_aaa_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
with torch.no_grad():
A__ = model(**_lowerCamelCase )[0]
A__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
A__ = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def a_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCamelCase )
# change to intended input
A__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
A__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
A__ = prepare_mam_aaa_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
with torch.no_grad():
A__ = model(**_lowerCamelCase )[0]
A__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
A__ = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
A__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCamelCase )
A__ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
A__ = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
A__ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""pt""" )
A__ = model.generate(
input_ids=dct["""input_ids"""].to(_lowerCamelCase ) , attention_mask=dct["""attention_mask"""].to(_lowerCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
A__ = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
A__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
assert generated == expected_en
| 369 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=33 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=5_12 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Tuple=None , ) -> int:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
A__ = EsmModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
A__ = EsmForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
A__ = self.num_labels
A__ = EsmForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] = ()
__lowerCamelCase : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = EsmModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = EsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
A__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A__ = create_position_ids_from_input_ids(__lowerCAmelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.empty(2 , 4 , 30 )
A__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
A__ = embeddings.create_position_ids_from_inputs_embeds(__lowerCAmelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
A__ = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = model(__lowerCAmelCase )[0]
A__ = 33
A__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def a_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
A__ = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 276 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase ( A__ ):
"""simple docstring"""
_a = CustomTokenizer
pass | 97 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 356 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23 | 0 |
def A (__A : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = len(lowerCamelCase__ )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase__ )]
# Reverse whole list
UpperCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
snake_case_ : Any = input("Enter numbers separated by a comma:\n").strip()
snake_case_ : Optional[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 51 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a ( a__ ):
snake_case__ = (DPMSolverSDEScheduler,)
snake_case__ = 1_0
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case )
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 352 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def A__ ( self ) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ViTImageProcessor if is_vision_available() else None
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = EfficientFormerImageProcessorTester(self )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 321 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
_UpperCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase__ ( lowerCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase :List[Any] = model_type_to_module_name(lowerCamelCase )
lowercase :Optional[Any] = importlib.import_module(F".{module_name}", "transformers.models" )
try:
return getattr(lowerCamelCase, lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase, "__name__", lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase :Union[str, Any] = importlib.import_module("transformers" )
if hasattr(lowerCamelCase, lowerCamelCase ):
return getattr(lowerCamelCase, lowerCamelCase )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, **lowerCamelCase, ):
lowercase :List[Any] = get_file_from_repo(
lowerCamelCase, lowerCamelCase, cache_dir=lowerCamelCase, force_download=lowerCamelCase, resume_download=lowerCamelCase, proxies=lowerCamelCase, use_auth_token=lowerCamelCase, revision=lowerCamelCase, local_files_only=lowerCamelCase, )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowerCamelCase, encoding="utf-8" ) as reader:
return json.load(lowerCamelCase )
class __lowerCAmelCase :
def __init__( self: Optional[int] ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( cls: List[str] , _lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: int ):
lowercase :Union[str, Any] = kwargs.pop("config" , _lowerCAmelCase )
lowercase :Optional[Any] = kwargs.pop("trust_remote_code" , _lowerCAmelCase )
lowercase :Any = True
lowercase , lowercase :Tuple = ImageProcessingMixin.get_image_processor_dict(_lowerCAmelCase , **_lowerCAmelCase )
lowercase :int = config_dict.get("image_processor_type" , _lowerCAmelCase )
lowercase :Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowercase :List[Any] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase :Any = config_dict.pop("feature_extractor_type" , _lowerCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowercase :int = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowercase :Optional[Any] = config_dict["auto_map"]["AutoFeatureExtractor"]
lowercase :Dict = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[int] = AutoConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# It could be in `config.image_processor_type``
lowercase :Optional[int] = getattr(_lowerCAmelCase , "image_processor_type" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowercase :Tuple = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowercase :List[str] = image_processor_class_from_name(_lowerCAmelCase )
lowercase :Optional[int] = image_processor_auto_map is not None
lowercase :List[str] = image_processor_class is not None or type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING
lowercase :int = resolve_trust_remote_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if has_remote_code and trust_remote_code:
lowercase :str = get_class_from_dynamic_module(
_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
lowercase :Optional[Any] = kwargs.pop("code_revision" , _lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
lowercase :Tuple = IMAGE_PROCESSOR_MAPPING[type(_lowerCAmelCase )]
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[Any] ):
IMAGE_PROCESSOR_MAPPING.register(_lowerCAmelCase , _lowerCAmelCase )
| 158 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_UpperCAmelCase : Any = TypeVar("_T")
class __lowerCAmelCase ( Generic[_T]):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Iterable[_T] | None = None ):
lowercase :list[_T] = list(iterable or [] )
lowercase :list[_T] = []
def __len__( self: Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self: List[Any] ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: _T ):
self._stacka.append(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = self._stacka.pop
lowercase :List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'naver-clova-ix/donut-base-finetuned-docvqa'
_SCREAMING_SNAKE_CASE = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_SCREAMING_SNAKE_CASE = 'document_qa'
_SCREAMING_SNAKE_CASE = AutoProcessor
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel
_SCREAMING_SNAKE_CASE = ['image', 'text']
_SCREAMING_SNAKE_CASE = ['text']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCAmelCase = task_prompt.replace("""{user_input}""" , lowercase )
lowerCAmelCase = self.pre_processor.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors="""pt""" ).input_ids
lowerCAmelCase = self.pre_processor(lowercase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self , lowercase ) -> Optional[Any]:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowercase , ).sequences
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.pre_processor.batch_decode(lowercase )[0]
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCAmelCase = re.sub(r"""<.*?>""" , """""" , lowercase , count=1 ).strip() # remove first task start token
lowerCAmelCase = self.pre_processor.tokenajson(lowercase )
return sequence["answer"]
| 46 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_snake_case = logging.getLogger(__name__)
def _A ( snake_case , snake_case ) -> List[Any]:
# save results
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def _A ( snake_case , snake_case=False ) -> int:
_lowercase : Union[str, Any] = 2
if unlogit:
_lowercase : Optional[Any] = torch.pow(snake_case , snake_case )
_lowercase : List[Any] = p * torch.log(snake_case )
_lowercase : str = 0
return -plogp.sum(dim=-1 )
def _A ( snake_case ) -> List[Any]:
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _A ( snake_case , snake_case , snake_case , snake_case=True , snake_case=True , snake_case=None , snake_case=False ) -> Optional[int]:
_lowercase , _lowercase : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowercase : Optional[int] = torch.zeros(snake_case , snake_case ).to(args.device )
_lowercase : str = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
_lowercase : Any = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowercase : int = None
_lowercase : List[str] = 0.0
_lowercase : str = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_lowercase : Dict = tuple(t.to(args.device ) for t in inputs )
((_lowercase) , ) : Any = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowercase : str = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowercase , _lowercase , _lowercase : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
_lowercase : Optional[int] = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowercase : List[str] = 2
_lowercase : Dict = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_lowercase : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
_lowercase : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowercase : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
_lowercase : Optional[Any] = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def _A ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_lowercase , _lowercase , _lowercase : Union[str, Any] = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
_lowercase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
_lowercase : List[Any] = torch.ones_like(snake_case )
_lowercase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowercase : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
_lowercase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowercase : Dict = float("Inf" )
_lowercase : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_lowercase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_lowercase : int = new_head_mask.view(-1 )
_lowercase : Union[str, Any] = 0.0
_lowercase : Dict = new_head_mask.view_as(snake_case )
_lowercase : str = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
_lowercase , _lowercase , _lowercase : Any = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
_lowercase : str = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _A ( snake_case , snake_case , snake_case , snake_case ) -> Any:
_lowercase : List[Any] = datetime.now()
_lowercase , _lowercase , _lowercase : List[Any] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
_lowercase : Tuple = 1 / loss
_lowercase : List[Any] = datetime.now() - before_time
_lowercase : int = sum(p.numel() for p in model.parameters() )
_lowercase : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
_lowercase : Optional[Any] = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
_lowercase : List[str] = sum(p.numel() for p in model.parameters() )
_lowercase : int = datetime.now()
_lowercase , _lowercase , _lowercase : Any = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
_lowercase : List[Any] = 1 / loss
_lowercase : int = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(snake_case , args.output_dir )
def _A ( ) -> int:
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=42 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
_lowercase : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowercase : Any = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_lowercase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowercase : List[Any] = torch.device("cuda" , args.local_rank )
_lowercase : Dict = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowercase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowercase : str = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
_lowercase : Dict = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
_lowercase : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowercase : List[str] = (torch.from_numpy(snake_case ),)
_lowercase : Dict = TensorDataset(*snake_case )
_lowercase : List[Any] = RandomSampler(snake_case )
_lowercase : str = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowercase : int = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 250 | 0 |
"""simple docstring"""
import random
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : str ) -> tuple[list[int], list[int]]:
_UpperCamelCase : List[Any] = [ord(__a ) for i in text]
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = []
for i in plain:
_UpperCamelCase : List[str] = random.randint(1 , 300 )
_UpperCamelCase : List[str] = (i + k) * k
cipher.append(__a )
key.append(__a )
return cipher, key
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : list[int] , __a : list[int] ) -> str:
_UpperCamelCase : int = []
for i in range(len(__a ) ):
_UpperCamelCase : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__a ) )
return "".join(__a )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 354 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
'''simple docstring'''
import torch
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=1 , snake_case_ : Tuple=False ):
super().__init__()
snake_case__ : List[str] = n_token
snake_case__ : Optional[int] = d_embed
snake_case__ : Any = d_proj
snake_case__ : Any = cutoffs + [n_token]
snake_case__ : Optional[int] = [0] + self.cutoffs
snake_case__ : Union[str, Any] = div_val
snake_case__ : Optional[Any] = self.cutoffs[0]
snake_case__ : int = len(self.cutoffs ) - 1
snake_case__ : int = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case__ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case__ : Any = nn.ModuleList()
snake_case__ : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) )
else:
self.out_projs.append(snake_case_ )
self.out_layers.append(nn.Linear(snake_case_ , snake_case_ ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case__ , snake_case__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : List[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) )
self.out_layers.append(nn.Linear(snake_case_ , r_idx - l_idx ) )
snake_case__ : int = keep_order
def lowerCamelCase ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[str] ):
if proj is None:
snake_case__ : List[str] = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case__ : int = nn.functional.linear(snake_case_ , proj.t().contiguous() )
snake_case__ : Optional[Any] = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCamelCase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : str=False ):
if labels is not None:
# Shift so that tokens < n predict n
snake_case__ : Dict = hidden[..., :-1, :].contiguous()
snake_case__ : Any = labels[..., 1:].contiguous()
snake_case__ : Tuple = hidden.view(-1 , hidden.size(-1 ) )
snake_case__ : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
snake_case__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case__ : str = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case__ : Dict = labels != -100
snake_case__ : Any = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device )
snake_case__ : int = (
-nn.functional.log_softmax(snake_case_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case__ : str = nn.functional.log_softmax(snake_case_ , dim=-1 )
else:
# construct weights and biases
snake_case__ , snake_case__ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case__ , snake_case__ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case__ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case__ : Any = self.out_layers[i].weight
snake_case__ : Optional[int] = self.out_layers[i].bias
if i == 0:
snake_case__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case_ )
biases.append(snake_case_ )
snake_case__ , snake_case__ , snake_case__ : Any = weights[0], biases[0], self.out_projs[0]
snake_case__ : Dict = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = nn.functional.log_softmax(snake_case_ , dim=1 )
if labels is None:
snake_case__ : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case__ : Optional[int] = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device )
snake_case__ : List[str] = 0
snake_case__ : Dict = [0] + self.cutoffs
for i in range(len(snake_case_ ) - 1 ):
snake_case__ , snake_case__ : int = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case__ : str = (labels >= l_idx) & (labels < r_idx)
snake_case__ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case__ : List[Any] = labels.index_select(0 , snake_case_ ) - l_idx
snake_case__ : Dict = head_logprob.index_select(0 , snake_case_ )
snake_case__ : List[str] = hidden.index_select(0 , snake_case_ )
else:
snake_case__ : List[str] = hidden
if i == 0:
if labels is not None:
snake_case__ : str = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
snake_case__ , snake_case__ , snake_case__ : Dict = weights[i], biases[i], self.out_projs[i]
snake_case__ : Optional[Any] = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = nn.functional.log_softmax(snake_case_ , dim=1 )
snake_case__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case__ : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case__ : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict ):
if self.n_clusters == 0:
snake_case__ : int = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case_ , dim=-1 )
else:
# construct weights and biases
snake_case__ , snake_case__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case__ , snake_case__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case__ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case__ : List[Any] = self.out_layers[i].weight
snake_case__ : List[str] = self.out_layers[i].bias
if i == 0:
snake_case__ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case__ : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case_ )
biases.append(snake_case_ )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
snake_case__ : List[str] = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case__ : Optional[Any] = nn.functional.log_softmax(snake_case_ , dim=1 )
snake_case__ : Tuple = [0] + self.cutoffs
for i in range(len(snake_case_ ) - 1 ):
snake_case__ , snake_case__ : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case__ : int = head_logprob[:, : self.cutoffs[0]]
else:
snake_case__ , snake_case__ , snake_case__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
snake_case__ : str = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Optional[int] = nn.functional.log_softmax(snake_case_ , dim=1 )
snake_case__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
snake_case__ : int = logprob_i
return out
| 35 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=10 , lowerCamelCase=3 , lowerCamelCase=32 * 4 , lowerCamelCase=32 * 6 , lowerCamelCase=4 , lowerCamelCase=32 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = is_training
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = num_queries
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_size
_lowerCAmelCase = max_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = mask_feature_size
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
_lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
_lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ (self ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = output.encoder_hidden_states
_lowerCAmelCase = output.pixel_decoder_hidden_states
_lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = MaskFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = MaskFormerForInstanceSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(lowerCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__UpperCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MaskFormerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def A__ (self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase = MaskFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (self.model_tester.min_size,) * 2
_lowerCAmelCase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
"""class_labels""": torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
_lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def A__ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE : Tuple = 1e-4
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase = inputs["""pixel_values"""].to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]]
_lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None ) | 368 |
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 317 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : int = False
class __A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : str = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_lowerCAmelCase : str = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = generator.manual_seed(0 )
_lowerCAmelCase : Dict = pipe.dual_guided(
prompt="""first prompt""" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ):
_lowerCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = """cyberpunk 2077"""
_lowerCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCAmelCase : str = """A painting of a squirrel eating a burger """
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_lowerCAmelCase : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCAmelCase : str = pipe.image_variation(a__ , generator=a__ , output_type="""numpy""" ).images
_lowerCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 44 | """simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[int] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[str] = """Hello, World!"""
a : List[Any] = """en_XX"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Dict = Path("""data_bin""" )
UpperCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowercase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowercase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowercase )
UpperCAmelCase : str = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase : Any = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : str = xmod_layer.fca.bias
# output
UpperCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase : Dict = xmod_layer.fca.weight
UpperCAmelCase : Dict = xmod_layer.fca.bias
UpperCAmelCase : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Tuple = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowercase ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =BlenderbotSmallTokenizer
lowercase_ : str =False
def A__ ( self):
super().setUp()
lowercase = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase = dict(zip(A__ ,range(len(A__))))
lowercase = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(A__) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A__))
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,A__):
lowercase = '''adapt act apte'''
lowercase = '''adapt act apte'''
return input_text, output_text
def A__ ( self):
lowercase = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map)
lowercase = '''adapt act apte'''
lowercase = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase = tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,A__)
def A__ ( self):
lowercase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
assert tok('''sam''').input_ids == [1_3_8_4]
lowercase = '''I am a small frog.'''
lowercase = tok([src_text] ,padding=A__ ,truncation=A__)['''input_ids''']
lowercase = tok.batch_decode(A__ ,skip_special_tokens=A__ ,clean_up_tokenization_spaces=A__)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A__ ( self):
lowercase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
lowercase = '''I am a small frog .'''
lowercase = '''.'''
lowercase = tok(A__)['''input_ids''']
lowercase = tok(A__)['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 101 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=[10, 20, 30, 40], SCREAMING_SNAKE_CASE_=[2, 2, 3, 2], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"], SCREAMING_SNAKE_CASE_=[2, 3, 4], SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : List[Any] = num_stages
UpperCamelCase : Any = hidden_sizes
UpperCamelCase : Optional[int] = depths
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : str = initializer_range
UpperCamelCase : List[str] = out_features
UpperCamelCase : List[str] = out_indices
UpperCamelCase : str = scope
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Any:
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Dict = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Optional[int] = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : int = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase : Any = None
UpperCamelCase : List[str] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = ConvNextModelTester(self )
UpperCamelCase : int = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def snake_case_ ( self ) -> str:
pass
def snake_case_ ( self ) -> Any:
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , a__ ):
UpperCAmelCase__ : Tuple = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = ConvNextConfig
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = ConvNextModelTester(self )
| 119 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class _lowerCamelCase :
def __init__( self : List[Any] , UpperCamelCase : bytes ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = data
# Initialize hash values
lowerCAmelCase__ : Any = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
lowerCAmelCase__ : Optional[int] = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
lowerCAmelCase__ : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = b"""\x80""" + (b"""\x00""" * (63 - (len(UpperCamelCase ) + 8) % 64))
lowerCAmelCase__ : List[Any] = struct.pack(""">Q""" , (len(UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
# Convert into blocks of 64 bytes
lowerCAmelCase__ : List[str] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase__ : str = list(struct.unpack(""">16L""" , UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase__ : List[str] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase__ : int = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase__ : Tuple = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
lowerCAmelCase__ : Dict = self.ror(UpperCamelCase , 6 ) ^ self.ror(UpperCamelCase , 11 ) ^ self.ror(UpperCamelCase , 25 )
lowerCAmelCase__ : Optional[Any] = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
lowerCAmelCase__ : Optional[int] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
lowerCAmelCase__ : Tuple = self.ror(UpperCamelCase , 2 ) ^ self.ror(UpperCamelCase , 13 ) ^ self.ror(UpperCamelCase , 22 )
lowerCAmelCase__ : int = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase__ : Union[str, Any] = (sa + maj) % 0X1_00_00_00_00
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
lowerCAmelCase__ : Any = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase__ : Tuple = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase__ : Dict = """""".join([hex(UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int , UpperCamelCase : int ) -> int:
"""simple docstring"""
return 0Xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
import hashlib
lowerCAmelCase__ : Union[str, Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(UpperCamelCase ).hash , hashlib.shaaaa(UpperCamelCase ).hexdigest() )
def lowercase_ ( ) -> None:
import doctest
doctest.testmod()
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Any = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCAmelCase__ : Union[str, Any] = f.read()
else:
lowerCAmelCase__ : str = bytes(__UpperCAmelCase , """utf-8""" )
print(SHAaaa(__UpperCAmelCase ).hash )
if __name__ == "__main__":
main()
| 212 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-12 ) -> Dict:
lowerCAmelCase__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__UpperCAmelCase , axis=1 ) , a_min=__UpperCAmelCase ) ).T
lowerCAmelCase__ : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__UpperCAmelCase , axis=1 ) , a_min=__UpperCAmelCase ) ).T
return jnp.matmul(__UpperCAmelCase , norm_emb_a.T )
class _lowerCamelCase ( nn.Module ):
_lowerCamelCase :CLIPConfig
_lowerCamelCase :jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ : Dict = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
lowerCAmelCase__ : Optional[int] = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCAmelCase__ : Optional[int] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ : List[Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCAmelCase__ : List[Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.vision_model(UpperCamelCase )[1]
lowerCAmelCase__ : List[Any] = self.visual_projection(UpperCamelCase )
lowerCAmelCase__ : str = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
lowerCAmelCase__ : Optional[int] = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ : Any = 0.0
lowerCAmelCase__ : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ : Any = jnp.round(UpperCamelCase , 3 )
lowerCAmelCase__ : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ : Tuple = is_special_care * 0.01
lowerCAmelCase__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ : Dict = jnp.round(UpperCamelCase , 3 )
lowerCAmelCase__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = CLIPConfig
_lowerCamelCase :Dict = "clip_input"
_lowerCamelCase :int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : int , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : int , ) -> str:
"""simple docstring"""
if input_shape is None:
lowerCAmelCase__ : str = (1, 2_24, 2_24, 3)
lowerCAmelCase__ : Union[str, Any] = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
# init input tensor
lowerCAmelCase__ : Union[str, Any] = jax.random.normal(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = jax.random.split(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase__ : str = self.module.init(UpperCamelCase , UpperCamelCase )["""params"""]
return random_params
def __call__( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : dict = None , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 212 | 1 |
from __future__ import annotations
def a ( snake_case__: list[float] ):
'''simple docstring'''
if len(snake_case__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase_ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 0 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
SCREAMING_SNAKE_CASE_: List[Any] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE_: str = [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE_: Optional[Any] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = {"""a""", """b""", """c""", """d""", """e"""}
lowerCAmelCase : List[str] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 127 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_snake_case = b * b - 4 * a * c
_snake_case = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
_snake_case = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 288 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : str = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ) -> Optional[int]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Any = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Dict = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Optional[int] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : Any = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCAmelCase : List[str] = features.copy()
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : List[str] = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Optional[Any]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : List[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Dict:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : str = jsonl_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Dict = [jsonl_path]
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ) -> Any:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[int] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
if split:
UpperCAmelCase : Optional[int] = {split: jsonl_path}
else:
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Any = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict ) -> str:
return [json.loads(_lowerCAmelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : str , __snake_case : str , __snake_case : str , __snake_case : int ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Any = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : List[str] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def A ( self : List[Any] , __snake_case : str ) -> Dict:
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def A ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
UpperCAmelCase : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : str = f.read()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 23 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
snake_case__ : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
snake_case__ : Optional[int] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
snake_case__ : List[Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case__ : str = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 365 | '''simple docstring'''
snake_case__ : Optional[Any] = tuple[float, float, float]
snake_case__ : Tuple = tuple[float, float, float]
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad ):
"""simple docstring"""
UpperCAmelCase_ : Any = end_pointa[0] - end_pointa[0]
UpperCAmelCase_ : Optional[Any] = end_pointa[1] - end_pointa[1]
UpperCAmelCase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : Vectorad ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase_ : Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase_ : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : int ):
"""simple docstring"""
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : int = 10 ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 274 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class __snake_case ( _a ):
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
UpperCAmelCase : List[Any] ={}
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
''' `placeholder_token` that is not already in the tokenizer.''' )
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , snake_case__=1 , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : str =[]
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
UpperCAmelCase : Optional[Any] =[]
for i in range(snake_case_ ):
UpperCAmelCase : Any =placeholder_token + f'''_{i}'''
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
UpperCAmelCase : int =output
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=False , snake_case__=1.0 ) -> Dict:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase : List[Any] =[]
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase : Optional[Any] =self.token_map[placeholder_token]
UpperCAmelCase : Union[str, Any] =tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase : Dict =copy.copy(snake_case_ )
random.shuffle(snake_case_ )
UpperCAmelCase : Optional[Any] =text.replace(snake_case_ , ''' '''.join(snake_case_ ) )
return text
def __call__( self , snake_case__ , *snake_case__ , snake_case__=False , snake_case__=1.0 , **snake_case__ ) -> List[str]:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , snake_case__=False , snake_case__=1.0 , **snake_case__ ) -> List[str]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
| 348 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE, path=__SCREAMING_SNAKE_CASE, revision=__SCREAMING_SNAKE_CASE )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__SCREAMING_SNAKE_CASE )}'''
| 369 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = _TestCommandArgs(dataset=__snake_case, all_configs=__snake_case, save_infos=__snake_case )
_UpperCamelCase = TestCommand(*__snake_case )
test_command.run()
_UpperCamelCase = os.path.join(__snake_case, '''README.md''' )
assert os.path.exists(__snake_case )
_UpperCamelCase = DatasetInfosDict.from_directory(__snake_case )
_UpperCamelCase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ), splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
], download_size=3_94_06_80, dataset_size=2_58_99_81, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_UpperCamelCase , _UpperCamelCase = getattr(dataset_infos['''default'''], __snake_case ), getattr(expected_dataset_infos['''default'''], __snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case, __snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 100 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Dict = "autoformer"
__lowerCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase=True , _lowerCAmelCase = 10 , _lowerCAmelCase = 25 , _lowerCAmelCase = 3 , **_lowerCAmelCase , ) -> int:
# time series specific configuration
_lowerCAmelCase = prediction_length
_lowerCAmelCase = context_length if context_length is not None else prediction_length
_lowerCAmelCase = distribution_output
_lowerCAmelCase = loss
_lowerCAmelCase = input_size
_lowerCAmelCase = num_time_features
_lowerCAmelCase = lags_sequence
_lowerCAmelCase = scaling
_lowerCAmelCase = num_dynamic_real_features
_lowerCAmelCase = num_static_real_features
_lowerCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_lowerCAmelCase = cardinality
else:
_lowerCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_lowerCAmelCase = embedding_dimension
else:
_lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
_lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = use_cache
# Autoformer
_lowerCAmelCase = label_length
_lowerCAmelCase = moving_average
_lowerCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 158 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 116 | 0 |
'''simple docstring'''
from manim import *
class a ( _a ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.2_5 , width=0.2_5 )
_a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
_a = Text('CPU' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = Text('GPU' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = Text('Model' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
_a = []
_a = []
_a = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
_a = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ , *A_ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = Text('Loaded Checkpoint' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
_a = []
_a = []
for i, rect in enumerate(A_ ):
_a = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
_a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
_a = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_a = [meta_mem.copy() for i in range(6 )]
_a = [meta_mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
_a = Text('Disk' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(A_ , run_time=3 ) , Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
_a = []
for i, rect in enumerate(A_ ):
_a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
_a = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
self.play(
FadeOut(A_ , A_ , *A_ , *A_ ) , )
self.wait()
| 168 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310 | 0 |
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
while b:
lowercase_ , lowercase_ :Optional[Any] = b, a % b
return a
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_a , a % b )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 352 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] ="""gpt_bigcode"""
lowercase : Dict =["""past_key_values"""]
lowercase : List[Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=5_0257 , UpperCamelCase_=1024 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=None , UpperCamelCase_="gelu_pytorch_tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
lowercase_ :Any = vocab_size
lowercase_ :List[str] = n_positions
lowercase_ :Union[str, Any] = n_embd
lowercase_ :Dict = n_layer
lowercase_ :Optional[int] = n_head
lowercase_ :List[str] = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :Optional[int] = resid_pdrop
lowercase_ :Union[str, Any] = embd_pdrop
lowercase_ :Any = attn_pdrop
lowercase_ :Optional[Any] = layer_norm_epsilon
lowercase_ :str = initializer_range
lowercase_ :Optional[Any] = scale_attn_weights
lowercase_ :Any = use_cache
lowercase_ :Union[str, Any] = attention_softmax_in_fpaa
lowercase_ :int = scale_attention_softmax_in_fpaa
lowercase_ :Union[str, Any] = multi_query
lowercase_ :List[str] = bos_token_id
lowercase_ :Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 0 |
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] ):
'''simple docstring'''
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("""String lengths must match!""" )
__lowerCamelCase = 0
for chara, chara in zip(snake_case__ , snake_case__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Any = mock.Mock()
UpperCamelCase_: Dict = 5_0_0
UpperCamelCase_: Any = {}
UpperCamelCase_: Tuple = HTTPError
UpperCamelCase_: List[str] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Union[str, Any] = mock.Mock()
UpperCamelCase_: Union[str, Any] = 5_0_0
UpperCamelCase_: str = {}
UpperCamelCase_: List[str] = HTTPError
UpperCamelCase_: Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: str = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase_: Optional[int] = tempfile.mktemp()
with open(_lowerCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _lowerCamelCase )
UpperCamelCase_: Tuple = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _lowerCamelCase )
UpperCamelCase_: List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_: Any = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ):
UpperCamelCase_: Optional[int] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: List[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: int = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Union[str, Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id='test-tokenizer' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: List[str] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Union[str, Any] = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase_: Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Optional[int] = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Dict = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: List[str] = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase_: int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _a ( self ):
UpperCamelCase_: Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _a ( self ):
UpperCamelCase_: int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _a ( self ):
UpperCamelCase_: str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _a ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase_: Union[str, Any] = Trie()
UpperCamelCase_: Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ['AB', 'C'] ) | 292 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =CLIPTokenizer
a : List[Any] =CLIPTokenizerFast
a : str =True
a : List[Any] ={}
a : str =False
def _a ( self ):
super().setUp()
# fmt: off
UpperCamelCase_: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase_: Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCamelCase_: Any = {'unk_token': '<unk>'}
UpperCamelCase_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = 'lower newer'
UpperCamelCase_: Tuple = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Optional[int] = 'lower newer'
UpperCamelCase_: Any = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCamelCase_: Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[Any] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@require_ftfy
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Optional[int] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCamelCase_: Tuple = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: Any = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase_: List[str] = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCamelCase_: List[Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: int = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase_: Dict = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase_: int = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase_: List[str] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase_: Optional[Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: str = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase_: str = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCamelCase_: int = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: List[str] = f''' {text}'''
UpperCamelCase_: str = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCamelCase_: Dict = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
def _a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _a ( self ):
super().test_tokenization_python_rust_equals()
def _a ( self ):
# CLIP always lower cases letters
pass | 292 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: int=None , _lowerCamelCase: str=None , _lowerCamelCase: Any=None , _lowerCamelCase: int=None , _lowerCamelCase: str=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCamelCase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCamelCase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
def __init__( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any]=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=4 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : str=0 , UpperCAmelCase : Union[str, Any]=0.0_2 , ):
__lowerCamelCase : Tuple = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : Dict = seq_length
__lowerCamelCase : Dict = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : str = eos_token_id
__lowerCamelCase : List[Any] = pad_token_id
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : List[Any] = initializer_range
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCamelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCamelCase : Optional[int] = shift_tokens_right(UpperCAmelCase , 1 , 2 )
__lowerCamelCase : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , )
__lowerCamelCase : int = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : int ):
__lowerCamelCase , __lowerCamelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] ):
__lowerCamelCase : int = 20
__lowerCamelCase : Tuple = model_class_name(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] )
__lowerCamelCase , __lowerCamelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowerCamelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowerCamelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
__lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCamelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
__lowerCamelCase : Optional[Any] = model.decode(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCamelCase__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ):
__lowerCamelCase : List[Any] = 20
__lowerCamelCase : Any = model_class_name(UpperCAmelCase )
__lowerCamelCase : int = model.encode(inputs_dict["input_ids"] )
__lowerCamelCase , __lowerCamelCase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowerCamelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
__lowerCamelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCamelCase : Any = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
__lowerCamelCase : Dict = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
__lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class _snake_case ( unittest.TestCase ):
snake_case__ = 99
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCamelCase : List[Any] = input_ids.shape[0]
__lowerCamelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self._get_config_and_data()
__lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
__lowerCamelCase : List[str] = lm_model(input_ids=UpperCAmelCase )
__lowerCamelCase : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
__lowerCamelCase : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCamelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCamelCase : Optional[int] = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
__lowerCamelCase : Optional[int] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCamelCase : Optional[Any] = shift_tokens_right(UpperCAmelCase , 1 , 2 )
__lowerCamelCase : str = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__lowerCamelCase : Optional[int] = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _snake_case ( a__ , unittest.TestCase , a__ ):
snake_case__ = True
snake_case__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
snake_case__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Tuple = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase : Dict , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[int] ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__lowerCamelCase : Optional[int] = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowerCamelCase : Dict = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Dict = model_class(UpperCAmelCase )
__lowerCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowerCamelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("JIT Enabled" ):
__lowerCamelCase : Optional[Any] = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowerCamelCase : Optional[Any] = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCamelCase : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCamelCase : str = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
__lowerCamelCase : Optional[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__lowerCamelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase )
__lowerCamelCase : Dict = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__lowerCamelCase : Union[str, Any] = ["Sam"]
__lowerCamelCase : List[str] = tokenizer(UpperCAmelCase , return_tensors="jax" )
__lowerCamelCase : List[str] = model.generate(**UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : List[str] = "Sam is a great name. It means \"sun\" in Gaelic."
__lowerCamelCase : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text | 64 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 64 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'luke'
def __init__( self : int , a : Optional[Any]=50_267 , a : List[str]=500_000 , a : Dict=768 , a : Optional[int]=256 , a : Any=12 , a : int=12 , a : Optional[Any]=3_072 , a : Any="gelu" , a : Union[str, Any]=0.1 , a : str=0.1 , a : str=512 , a : List[Any]=2 , a : Any=0.0_2 , a : List[str]=1E-12 , a : Tuple=True , a : int=None , a : Union[str, Any]=1 , a : List[str]=0 , a : List[Any]=2 , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : str = entity_vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Optional[int] = entity_emb_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : str = use_entity_aware_attention
lowerCAmelCase__ : int = classifier_dropout | 212 |
class A__ :
def __init__( self : Dict , a : list[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = len(a )
lowerCAmelCase__ : Optional[Any] = [0] * len_array
if len_array > 0:
lowerCAmelCase__ : List[Any] = array[0]
for i in range(1 , a ):
lowerCAmelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def _lowerCamelCase ( self : int , a : int , a : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _lowerCamelCase ( self : Optional[Any] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 212 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''ViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 241 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 2000000 ):
UpperCAmelCase_ = [0 for i in range(n + 1 )]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCAmelCase__ ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
for i in range(lowerCAmelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 241 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
assert noofclusters < len(UpperCamelCase_ )
# Find out the dimensionality
snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case = list(range(len(UpperCamelCase_ ) ) )
shuffle(UpperCamelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCamelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case = tf.placeholder('''float64''' ,[dim] )
snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCamelCase_ ,UpperCamelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case = [tf.Variable(0 ) for i in range(len(UpperCamelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case = tf.placeholder('''int32''' )
snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCamelCase_ ,UpperCamelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case = tf.placeholder('''float''' ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case = tf.reduce_mean(UpperCamelCase_ ,0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case = tf.placeholder('''float''' ,[dim] )
snake_case = tf.placeholder('''float''' ,[dim] )
snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCamelCase_ ,UpperCamelCase_ ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case = tf.placeholder('''float''' ,[noofclusters] )
snake_case = tf.argmin(UpperCamelCase_ ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCamelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case = 1_00
for _ in range(UpperCamelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCamelCase_ ) ):
snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case = [
sess.run(UpperCamelCase_ ,feed_dict={va: vect, va: sess.run(UpperCamelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case = sess.run(
UpperCamelCase_ ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCamelCase_ ):
# Collect all the vectors assigned to this cluster
snake_case = [
vectors[i]
for i in range(len(UpperCamelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case = sess.run(
UpperCamelCase_ ,feed_dict={mean_input: array(UpperCamelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case = sess.run(UpperCamelCase_ )
snake_case = sess.run(UpperCamelCase_ )
return centroids, assignments
| 127 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase ={
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 237 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if hor == 128:
lowerCAmelCase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowerCAmelCase__ = (32, 128, 256)
lowerCAmelCase__ = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowerCAmelCase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowerCAmelCase__ = (32, 64, 128, 256)
lowerCAmelCase__ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowerCAmelCase__ = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowerCAmelCase__ = UNetaDModel(**__a )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
lowerCAmelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ = state_dict.pop(__a )
hf_value_function.load_state_dict(__a )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__a , __a )
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowerCAmelCase__ = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
lowerCAmelCase__ = model
lowerCAmelCase__ = UNetaDModel(**__a )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
lowerCAmelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ = state_dict.pop(__a )
hf_value_function.load_state_dict(__a )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__a , __a )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 340 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A (unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def a_ ( self : Any ) -> str:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ = FlaxAlbertModelTester(self )
@slow
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("""albert-base-v2""" )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
@require_flax
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
A__ = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 274 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowercase : Dict = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowercase : int = {
"allenai/longformer-base-4096": 4096,
"allenai/longformer-large-4096": 4096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ():
__UpperCamelCase : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCamelCase : Optional[int] = bs[:]
__UpperCamelCase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCamelCase : List[Any] = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] ):
__UpperCamelCase : Optional[int] = set()
__UpperCamelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase : str = char
return pairs
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , **__UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
__UpperCamelCase : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
__UpperCamelCase : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
__UpperCamelCase : int = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
__UpperCamelCase : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
__UpperCamelCase : List[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : int = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCamelCase : Tuple = json.load(__UpperCamelCase )
__UpperCamelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = errors # how to handle errors in decoding
__UpperCamelCase : str = bytes_to_unicode()
__UpperCamelCase : str = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCamelCase : Dict = merges_handle.read().split("\n" )[1:-1]
__UpperCamelCase : int = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase : str = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__UpperCamelCase : List[str] = {}
__UpperCamelCase : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase : int = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCamelCase : Union[str, Any] = tuple(__UpperCamelCase )
__UpperCamelCase : Dict = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
__UpperCamelCase : Tuple = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase : List[str] = bigram
__UpperCamelCase : Any = []
__UpperCamelCase : int = 0
while i < len(__UpperCamelCase ):
try:
__UpperCamelCase : List[str] = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : int = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : List[Any] = tuple(__UpperCamelCase )
__UpperCamelCase : Dict = new_word
if len(__UpperCamelCase ) == 1:
break
else:
__UpperCamelCase : Optional[int] = get_pairs(__UpperCamelCase )
__UpperCamelCase : List[str] = " ".join(__UpperCamelCase )
__UpperCamelCase : List[Any] = word
return word
def __lowerCamelCase ( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = []
for token in re.findall(self.pat , __UpperCamelCase ):
__UpperCamelCase : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def __lowerCamelCase ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Any = "".join(__UpperCamelCase )
__UpperCamelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase : Any = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
__UpperCamelCase : Dict = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCamelCase : List[Any] = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : List[str] = [self.cls_token_id]
__UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : int = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
__UpperCamelCase : Union[str, Any] = " " + text
return (text, kwargs) | 171 |
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00 ):
__UpperCamelCase : Tuple = n * (n + 1) * (2 * n + 1) / 6
__UpperCamelCase : List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""") | 171 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[str] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
__UpperCamelCase : int = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
__UpperCamelCase : Any = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
__UpperCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_MAPPING
__UpperCamelCase : List[str] = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : List[str] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : List[str] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : Any = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 106 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 11
__SCREAMING_SNAKE_CASE = int("""1""" + """0""" * digit_len )
for num in range(UpperCamelCase_ , UpperCamelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCamelCase_ , UpperCamelCase_ ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
__SCREAMING_SNAKE_CASE = 10
return solutions
def _lowerCAmelCase ( UpperCamelCase_ = 2 ):
__SCREAMING_SNAKE_CASE = 1.0
for fraction in fraction_list(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Fraction(UpperCamelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase_ )
if __name__ == "__main__":
print(solution())
| 100 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( __lowercase ):
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Dict=10_24 , __magic_name__ : List[Any]=10_24 , __magic_name__ : Optional[Any]=3.6 ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = tokenizer
__snake_case : Tuple = tokenizer.bos_token_id
__snake_case : Optional[int] = dataset
__snake_case : int = seq_length
__snake_case : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : int = iter(self.dataset )
__snake_case : str = True
while more_examples:
__snake_case : str = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__magic_name__ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
__snake_case : Union[str, Any] = False
break
__snake_case : List[Any] = tokenizer(__magic_name__ , truncation=__magic_name__ )["""input_ids"""]
__snake_case : Union[str, Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__magic_name__ ) , self.seq_length ):
__snake_case : Tuple = all_token_ids[i : i + self.seq_length]
if len(__magic_name__ ) == self.seq_length:
yield torch.tensor(__magic_name__ )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {"""streaming""": True}
__snake_case : int = load_dataset(args.dataset_name , split="""train""" , **_lowerCamelCase )
__snake_case : Any = ConstantLengthDataset(_lowerCamelCase , _lowerCamelCase , seq_length=args.seq_length )
__snake_case : int = DataLoader(_lowerCamelCase , batch_size=args.batch_size )
return eval_dataloader
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
model.eval()
__snake_case : Dict = []
for step, batch in enumerate(_lowerCamelCase ):
with torch.no_grad():
__snake_case : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
__snake_case : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__snake_case : Tuple = torch.mean(torch.cat(_lowerCamelCase ) )
try:
__snake_case : Optional[Any] = torch.exp(_lowerCamelCase )
except OverflowError:
__snake_case : Dict = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__UpperCamelCase = Accelerator()
# Parse configuration
__UpperCamelCase = HfArgumentParser(EvaluationArguments)
__UpperCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
__UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__UpperCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
__UpperCamelCase , __UpperCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__UpperCamelCase , __UpperCamelCase = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
"""simple docstring"""
from string import ascii_uppercase
lowercase__ = {char: i for i, char in enumerate(ascii_uppercase)}
lowercase__ = dict(enumerate(ascii_uppercase))
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Tuple = len(_lowerCAmelCase )
a__: int = 0
while True:
if x == i:
a__: str = 0
if len(_lowerCAmelCase ) == len(_lowerCAmelCase ):
break
key += key[i]
i += 1
return key
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[Any] = """"""
a__: Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a__: Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Tuple = """"""
a__: str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a__: Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __a ( ) ->None:
a__: str = """THE GERMAN ATTACK"""
a__: Optional[int] = """SECRET"""
a__: Union[str, Any] = generate_key(_lowerCAmelCase , _lowerCAmelCase )
a__: int = cipher_text(_lowerCAmelCase , _lowerCAmelCase )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(_lowerCAmelCase , _lowerCAmelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 290 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_:Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Dict = """rougeLsum"""
A : str = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = ["""rouge1""", """rouge2""", """rougeL"""]
A : Dict = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
A : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A : Optional[Any] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase )
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Tuple = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A : Union[str, Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=_lowerCAmelCase )["""rougeLsum"""]
A : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
A : Tuple = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A : Optional[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
A : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
| 116 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self :Dict , _lowercase :Tuple=2 , _lowercase :str=3 , _lowercase :Optional[Any]=64 , _lowercase :Optional[int]=None) -> List[str]:
UpperCAmelCase_ = np.random.default_rng(_lowercase)
UpperCAmelCase_ = length
UpperCAmelCase_ = rng.normal(size=(length,)).astype(np.floataa)
UpperCAmelCase_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self :Union[str, Any]) -> List[Any]:
return self.length
def __getitem__( self :int , _lowercase :Optional[int]) -> Union[str, Any]:
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self :Dict , _lowercase :str=0 , _lowercase :Optional[Any]=0 , _lowercase :Optional[int]=False) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3]).float())
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3]).float())
UpperCAmelCase_ = True
def __a ( self :Tuple , _lowercase :Tuple=None) -> str:
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
UpperCAmelCase_ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :int=0 , _lowercase :List[Any]=0 , _lowercase :Optional[Any]=False) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor(_lowercase).float())
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor(_lowercase).float())
UpperCAmelCase_ = True
def __a ( self :List[Any] , _lowercase :List[Any]=None) -> Dict:
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
UpperCAmelCase_ = False
return x * self.a + self.b
def A ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ = load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCAmelCase_ = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
UpperCAmelCase_ = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
"""simple docstring"""
from string import ascii_uppercase
__lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
__lowercase = dict(enumerate(ascii_uppercase))
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : List[str] = len(A_ )
a : Optional[int] = 0
while True:
if x == i:
a : Optional[Any] = 0
if len(A_ ) == len(A_ ):
break
key += key[i]
i += 1
return key
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : Optional[int] = ""
a : Union[str, Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a : Tuple = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : Optional[Any] = ""
a : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a : Tuple = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowercase ( )-> None:
'''simple docstring'''
a : Union[str, Any] = "THE GERMAN ATTACK"
a : Any = "SECRET"
a : str = generate_key(A_ , A_ )
a : Tuple = cipher_text(A_ , A_ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(A_ , A_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCAmelCase, UpperCAmelCase : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCAmelCase : Dict = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCAmelCase : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase : str = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _lowerCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =MvpTokenizer
a_ : List[str] =MvpTokenizerFast
a_ : Any =True
a_ : Any =filter_roberta_detectors
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_snake_case : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_snake_case : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : Tuple = {"""unk_token""": """<unk>"""}
_snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def UpperCamelCase_ ( self : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Dict = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors='pt' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , snake_case_ )
self.assertIn('attention_mask' , snake_case_ )
self.assertNotIn('labels' , snake_case_ )
self.assertNotIn('decoder_attention_mask' , snake_case_ )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : str = tokenizer(text_target=snake_case_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Optional[Any] = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=snake_case_ , truncation=snake_case_ , return_tensors='pt' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""A long paragraph for summarization."""]
_snake_case : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors='pt' )
_snake_case : int = inputs["""input_ids"""]
_snake_case : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_snake_case : Optional[Any] = """A, <mask> AllenNLP sentence."""
_snake_case : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
_snake_case : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 371 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: str )-> List[str]:
# Initialise PyTorch model
_snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = MobileBertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
_snake_case : Optional[int] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 260 | 0 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :int ):
A = order
# a_{0} ... a_{k}
A = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A = [0.0] * self.order
# y[n-1] ... y[n-k]
A = [0.0] * self.order
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :list[float] , __UpperCamelCase :list[float] ):
if len(__UpperCamelCase ) < self.order:
A = [1.0, *a_coeffs]
if len(__UpperCamelCase ) != self.order + 1:
A = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(__UpperCamelCase )}"
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != self.order + 1:
A = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(__UpperCamelCase )}"
)
raise ValueError(__UpperCamelCase )
A = a_coeffs
A = b_coeffs
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :float ):
A = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A = self.input_history[:-1]
A = self.output_history[:-1]
A = sample
A = result
return result
| 292 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_UpperCamelCase = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_UpperCamelCase = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_UpperCamelCase = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/krishnap25/mauve""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/krishnap25/mauve"""] ,reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] ,)
def _UpperCamelCase ( self ,A ,A ,A=None ,A=None ,A=None ,A=None ,A="auto" ,A=-1 ,A=0.9 ,A=5 ,A=500 ,A="gpt2-large" ,A=-1 ,A=1_024 ,A=25 ,A=5 ,A=True ,A=25 ,):
UpperCAmelCase = compute_mauve(
p_text=A ,q_text=A ,p_features=A ,q_features=A ,p_tokens=A ,q_tokens=A ,num_buckets=A ,pca_max_data=A ,kmeans_explained_var=A ,kmeans_num_redo=A ,kmeans_max_iter=A ,featurize_model_name=A ,device_id=A ,max_text_length=A ,divergence_curve_discretization_size=A ,mauve_scaling_factor=A ,verbose=A ,seed=A ,)
return out
| 234 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_UpperCamelCase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_UpperCamelCase = dataset.iloc[:, 1:2].values
_UpperCamelCase = dataset.iloc[:, 2].values
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_UpperCamelCase = PolynomialFeatures(degree=4)
_UpperCamelCase = poly_reg.fit_transform(X)
_UpperCamelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _a ( ):
"""simple docstring"""
plt.scatter(_snake_case , _snake_case , color="""red""" )
plt.plot(_snake_case , pol_reg.predict(poly_reg.fit_transform(_snake_case ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 234 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 1 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : int = len(__SCREAMING_SNAKE_CASE )
lowercase : List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase : Union[str, Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase : List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase : Any = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
"""simple docstring"""
_UpperCamelCase: Dict = 2_5_6
# Modulus to hash a string
_UpperCamelCase: Union[str, Any] = 1_0_0_0_0_0_3
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Dict = len(_UpperCAmelCase )
lowercase : Union[str, Any] = len(_UpperCAmelCase )
if p_len > t_len:
return False
lowercase : Union[str, Any] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCAmelCase ):
lowercase : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
'''simple docstring'''
lowercase : Any = 'abc1abc12'
lowercase : int = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase ) and not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 2)
lowercase : str = 'ABABX'
lowercase : Tuple = 'ABABZABABYABABX'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 3)
lowercase : int = 'AAAB'
lowercase : Union[str, Any] = 'ABAAAAAB'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 4)
lowercase : Union[str, Any] = 'abcdabcy'
lowercase : List[str] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 5)
lowercase : Dict = 'Lü'
lowercase : Dict = 'Lüsai'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[Any] = 'Lue'
assert not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCAmelCase_ : int = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def lowerCamelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def lowerCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : int = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowerCAmelCase_ : List[str] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCAmelCase_ : Optional[Any] = DDPMScheduler()
lowerCAmelCase_ : Dict = AudioDiffusionPipeline(vqvae=a_ , unet=self.dummy_unet , mel=a_ , scheduler=a_ )
lowerCAmelCase_ : Union[str, Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(42 )
lowerCAmelCase_ : Optional[Any] = pipe(generator=a_ , steps=4 )
lowerCAmelCase_ : Optional[Any] = output.audios[0]
lowerCAmelCase_ : List[str] = output.images[0]
lowerCAmelCase_ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(42 )
lowerCAmelCase_ : Tuple = pipe(generator=a_ , steps=4 , return_dict=a_ )
lowerCAmelCase_ : Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCAmelCase_ : Dict = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowerCAmelCase_ : int = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowerCAmelCase_ : Optional[Any] = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCAmelCase_ : List[Any] = DDIMScheduler()
lowerCAmelCase_ : Any = self.dummy_vqvae_and_unet
lowerCAmelCase_ : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a_ , scheduler=a_ )
lowerCAmelCase_ : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
lowerCAmelCase_ : Tuple = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(42 )
lowerCAmelCase_ : Tuple = pipe(raw_audio=a_ , generator=a_ , start_step=5 , steps=10 )
lowerCAmelCase_ : Optional[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCAmelCase_ : Union[str, Any] = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowerCAmelCase_ : Optional[Any] = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase_ : Union[str, Any] = self.dummy_unet_condition
lowerCAmelCase_ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a_ , mel=a_ , scheduler=a_ )
lowerCAmelCase_ : List[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
lowerCAmelCase_ : str = torch.rand((1, 1, 10) )
lowerCAmelCase_ : str = pipe(generator=a_ , encoding=a_ )
lowerCAmelCase_ : List[Any] = output.images[0]
lowerCAmelCase_ : Dict = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowerCAmelCase_ : Tuple = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : List[Any] = torch_device
lowerCAmelCase_ : Optional[int] = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowerCAmelCase_ : Optional[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(42 )
lowerCAmelCase_ : List[str] = pipe(generator=a_ )
lowerCAmelCase_ : List[Any] = output.audios[0]
lowerCAmelCase_ : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCAmelCase_ : Any = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowerCAmelCase_ : int = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 241 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : int , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Tuple=True , a_ : Optional[int]=True , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : List[Any]=99 , a_ : List[Any]=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : Any=37 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Dict=5_12 , a_ : Union[str, Any]=16 , a_ : Optional[int]=2 , a_ : Dict=0.02 , a_ : List[str]=False , a_ : str=True , a_ : Any="None" , a_ : Dict=3 , a_ : List[str]=4 , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : Optional[int] = position_biased_input
lowerCAmelCase_ : Union[str, Any] = pos_att_type
lowerCAmelCase_ : Tuple = scope
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ : int = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : Tuple = 3_00
return config
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : int , a_ : Any , a_ : Tuple ):
lowerCAmelCase_ : Union[str, Any] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : List[str] = model(a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : Optional[Any] = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : int , a_ : Tuple , a_ : int , a_ : List[Any] ):
lowerCAmelCase_ : List[Any] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : List[str] , a_ : Tuple , a_ : Any , a_ : str , a_ : List[Any] , a_ : Tuple , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def lowerCamelCase ( self : Any , a_ : Union[str, Any] , a_ : Any , a_ : str , a_ : int , a_ : Dict , a_ : int , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Any = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Dict , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : str ):
lowerCAmelCase_ : Optional[int] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = True
a_ : Dict = False
a_ : int = False
a_ : str = False
a_ : List[Any] = False
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = DebertaModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@slow
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 241 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case_ = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Dict=None , snake_case_ : Any=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , ) -> Union[str, Any]:
if attention_mask is None:
__snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Dict , a__ : Union[str, Any] , a__ : Tuple=13 , a__ : Optional[Any]=7 , a__ : Union[str, Any]=True , a__ : Any=False , a__ : List[str]=99 , a__ : Dict=16 , a__ : Optional[int]=2 , a__ : List[str]=4 , a__ : Dict=4 , a__ : List[Any]="gelu" , a__ : Dict=0.1 , a__ : List[Any]=0.1 , a__ : List[Any]=32 , a__ : List[Any]=2 , a__ : str=1 , a__ : Union[str, Any]=0 , a__ : List[str]=0.0_2 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = eos_token_id
__snake_case = pad_token_id
__snake_case = bos_token_id
__snake_case = initializer_range
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__snake_case = shift_tokens_right(a__ , 1 , 2 )
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a__ , )
__snake_case = prepare_blenderbot_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def a (self : int , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = 20
__snake_case = model_class_name(a__ )
__snake_case = model.encode(inputs_dict['''input_ids'''] )
__snake_case , __snake_case = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__snake_case = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__snake_case = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
__snake_case = model.decode(a__ , a__ )
__snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def a (self : Dict , a__ : Optional[int] , a__ : List[Any] , a__ : Dict ):
"""simple docstring"""
__snake_case = 20
__snake_case = model_class_name(a__ )
__snake_case = model.encode(inputs_dict['''input_ids'''] )
__snake_case , __snake_case = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__snake_case = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__snake_case = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
__snake_case = model.decode(a__ , a__ , decoder_attention_mask=a__ )
__snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Tuple = 99
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__snake_case = input_ids.shape[0]
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a (self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._get_config_and_data()
__snake_case = FlaxBlenderbotForConditionalGeneration(a__ )
__snake_case = lm_model(input_ids=a__ )
__snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__snake_case = FlaxBlenderbotForConditionalGeneration(a__ )
__snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__snake_case = lm_model(input_ids=a__ , decoder_input_ids=a__ )
__snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__snake_case = shift_tokens_right(a__ , 1 , 2 )
__snake_case = np.equal(a__ , 1 ).astype(np.floataa ).sum()
__snake_case = np.equal(a__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase , _UpperCAmelCase ):
A_ : Dict = True
A_ : Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : Any = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = FlaxBlenderbotModelTester(self )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model_class(a__ )
@jax.jit
def encode_jitted(a__ : Dict , a__ : int=None , **a__ : Optional[int] ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest('''JIT Enabled''' ):
__snake_case = encode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = model_class(a__ )
__snake_case = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__snake_case = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(a__ : int , a__ : Union[str, Any] , a__ : List[Any] ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest('''JIT Enabled''' ):
__snake_case = decode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__snake_case = np.ones((1, 1) ) * model.config.eos_token_id
__snake_case = model(a__ )
self.assertIsNotNone(a__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__snake_case = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=a__ )
__snake_case = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__snake_case = ['''Sam''']
__snake_case = tokenizer(a__ , return_tensors='''jax''' )
__snake_case = model.generate(**a__ , **a__ )
__snake_case = '''Sam is a great name. It means "sun" in Gaelic.'''
__snake_case = tokenizer.batch_decode(a__ , **a__ )
assert generated_txt[0].strip() == tgt_text
| 238 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238 | 1 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCAmelCase : List[Any] =get_logger(__name__)
class UpperCAmelCase ( enum.Enum ):
__lowercase = """all_checks"""
__lowercase = """basic_checks"""
__lowercase = """no_checks"""
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
def UpperCamelCase ( _lowerCamelCase : Optional[dict] , _lowerCamelCase : dict , _lowerCamelCase : int=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) )
if len(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) )
A__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A__ = " for " + verification_name if verification_name is not None else ""
if len(_lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
def UpperCamelCase ( _lowerCamelCase : Optional[dict] , _lowerCamelCase : dict ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) )
if len(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(_lowerCamelCase ) - set(_lowerCamelCase ) ) )
A__ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : bool = True ):
if record_checksum:
A__ = shaaaa()
with open(_lowerCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(_lowerCamelCase )
A__ = m.hexdigest()
else:
A__ = None
return {"num_bytes": os.path.getsize(_lowerCamelCase ), "checksum": checksum}
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 237 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase : Any =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCAmelCase : Optional[int] =" def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Tuple )-> Union[str, Any]:
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
A__ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :Tuple=None )-> Optional[Any]:
A__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(lowercase_ , mode=lowercase_ )
A__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def UpperCAmelCase_ ( self :str )-> Optional[Any]:
A__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowercase_ ) , )
# Copy consistency with a really long name
A__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowercase_ , overwrite_result=re.sub("Bert" , "TestModel" , lowercase_ ) , )
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_ )
| 237 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""}
__snake_case = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__snake_case = {
"""facebook/xglm-564M""": 2048,
}
class _lowerCAmelCase ( A_ ):
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MgpstrTokenizer
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = "tester"
snake_case : List[str] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case : List[Any] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case : str = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case ,snake_case : Union[str, Any] = self.get_input_output_texts(UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize(UpperCamelCase__ )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
snake_case : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
| 112 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=2 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=2 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : Tuple = num_mel_bins
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[Any] = scope
UpperCAmelCase__ : int = frequency_stride
UpperCAmelCase__ : int = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ : Dict = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ : int = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ : Optional[Any] = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ : Tuple = num_patches + 2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_values, labels
def _a (self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = ASTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : int = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = ASTModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class(_lowerCamelCase )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = ASTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : List[str] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = torchaudio.load(lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.default_feature_extractor
UpperCAmelCase__ : Union[str, Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_lowerCamelCase )
UpperCAmelCase__ : str = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = prepare_audio()
UpperCAmelCase__ : Tuple = audio.squeeze().numpy()
UpperCAmelCase__ : int = feature_extractor(_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase__ : Tuple = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 171 |
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Any = len(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase__ : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase__ : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 1 |
def lowerCamelCase__ ( a__ : str , a__ : str = " " ) -> list:
UpperCamelCase_ = []
UpperCamelCase_ = 0
for index, char in enumerate(a__ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCamelCase_ = index + 1
elif index + 1 == len(a__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 261 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , __UpperCamelCase = 6 ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
self.create_linked_list(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = Node()
UpperCamelCase_ = current_node
UpperCamelCase_ = current_node
UpperCamelCase_ = current_node
for _ in range(1 , __UpperCamelCase ):
UpperCamelCase_ = Node()
UpperCamelCase_ = current_node
UpperCamelCase_ = previous_node
UpperCamelCase_ = current_node
UpperCamelCase_ = self.front
UpperCamelCase_ = previous_node
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase_ = self.rear.next
if self.rear:
UpperCamelCase_ = data
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase_ = self.front.data
UpperCamelCase_ = None
return data
UpperCamelCase_ = self.front
UpperCamelCase_ = old_front.next
UpperCamelCase_ = old_front.data
UpperCamelCase_ = None
return data
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowercase_ :
def __init__( self ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase__ = True
except (ImportError, AttributeError):
UpperCamelCase__ = object
def lowerCAmelCase_ ( *__A, **__A ) -> List[Any]:
'''simple docstring'''
pass
UpperCamelCase__ = False
UpperCamelCase__ = logging.get_logger('transformers-cli/serving')
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(_UpperCAmelCase, args.host, args.port, args.workers )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : dict
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[List[int]]
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any
class A ( UpperCAmelCase_ ):
@staticmethod
def lowercase_ (__UpperCAmelCase : ArgumentParser ) -> int:
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=lowerCAmelCase__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=lowerCAmelCase__ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=lowerCAmelCase__ , default=8_8_8_8 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=lowerCAmelCase__ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=lowerCAmelCase__ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=lowerCAmelCase__ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=lowerCAmelCase__ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=lowerCAmelCase__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__(self : Optional[int] , __UpperCAmelCase : Pipeline , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = pipeline
UpperCAmelCase__ = host
UpperCAmelCase__ = port
UpperCAmelCase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f"""Serving model over {host}:{port}""" )
UpperCAmelCase__ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["POST"] , ),
] , timeout=6_0_0 , )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase_ (self : Dict , __UpperCAmelCase : str = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , __UpperCAmelCase : bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> int:
"""simple docstring"""
try:
UpperCAmelCase__ = self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
UpperCAmelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"model": "", "error": str(lowerCAmelCase__ )} )
def lowercase_ (self : str , __UpperCAmelCase : List[int] = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , __UpperCAmelCase : bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , __UpperCAmelCase : bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ) -> List[str]:
"""simple docstring"""
try:
UpperCAmelCase__ = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model="" , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"model": "", "error": str(lowerCAmelCase__ )} )
async def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[int]=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> List[str]:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCAmelCase__ = self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {"error": str(lowerCAmelCase__ )} )
| 65 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
"""simple docstring"""
import math
import os
import sys
def __A ( _lowercase ):
'''simple docstring'''
_A = ''''''
try:
with open(__snake_case , '''rb''' ) as binary_file:
_A = binary_file.read()
for dat in data:
_A = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
lexicon.pop(__snake_case )
_A = last_match_id
if math.loga(__snake_case ).is_integer():
for curr_key in lexicon:
_A = '''0''' + lexicon[curr_key]
_A = bin(__snake_case )[2:]
def __A ( _lowercase ):
'''simple docstring'''
_A = {'''0''': '''0''', '''1''': '''1'''}
_A ,_A = '''''', ''''''
_A = len(__snake_case )
for i in range(len(__snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_A = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__snake_case , __snake_case , __snake_case , __snake_case )
index += 1
_A = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_A = lexicon[curr_string]
result += last_match_id
return result
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = os.path.getsize(__snake_case )
_A = bin(__snake_case )[2:]
_A = len(__snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = 8
try:
with open(__snake_case , '''wb''' ) as opened_file:
_A = [
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case ) , __snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = read_file_binary(__snake_case )
_A = compress_data(__snake_case )
_A = add_file_length(__snake_case , __snake_case )
write_file_binary(__snake_case , __snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 356 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "rag"
A_ = True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=True , __A: Any=None , __A: Dict=None , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[Any]=None , __A: Optional[Any]=" / " , __A: int=" // " , __A: List[Any]=5 , __A: Dict=3_00 , __A: int=7_68 , __A: Tuple=8 , __A: List[Any]="wiki_dpr" , __A: List[str]="train" , __A: Optional[Any]="compressed" , __A: Optional[int]=None , __A: Union[str, Any]=None , __A: Dict=False , __A: Tuple=False , __A: Optional[int]=0.0 , __A: Optional[int]=True , __A: int=False , __A: int=False , __A: Optional[Any]=False , __A: Optional[int]=True , __A: Optional[int]=None , **__A: Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A = kwargs.pop('''question_encoder''' )
_A = question_encoder_config.pop('''model_type''' )
_A = kwargs.pop('''generator''' )
_A = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__A , **__A )
_A = AutoConfig.for_model(__A , **__A )
_A = reduce_loss
_A = label_smoothing
_A = exclude_bos_score
_A = do_marginalize
_A = title_sep
_A = doc_sep
_A = n_docs
_A = max_combined_length
_A = dataset
_A = dataset_split
_A = index_name
_A = retrieval_vector_size
_A = retrieval_batch_size
_A = passages_path
_A = index_path
_A = use_dummy_dataset
_A = output_retrieved
_A = do_deduplication
_A = use_cache
if self.forced_eos_token_id is None:
_A = getattr(self.generator , '''forced_eos_token_id''' , __A )
@classmethod
def __A ( cls: List[Any] , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def __A ( self: Optional[Any] ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.question_encoder.to_dict()
_A = self.generator.to_dict()
_A = self.__class__.model_type
return output
| 75 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return choice(a_ )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = random_pivot(a_ )
# partition based on pivot
# linear time
A_ : str = [e for e in lst if e < pivot]
A_ : Dict = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_ ) < k - 1:
return kth_number(a_ , k - len(a_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def UpperCamelCase_ ( *lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ):
'''simple docstring'''
pass
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : int = np.array(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = npimg.shape
return {"hash": hashimage(UpperCAmelCase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
lowercase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowercase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
_UpperCamelCase : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
_UpperCamelCase : Tuple = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = 'facebook/sam-vit-huge'
_UpperCamelCase : Dict = pipeline('mask-generation' ,model=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
_UpperCamelCase : int = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 236 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCamelCase : str = (low + high) // 2
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = max_subarray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = max_subarray(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = max_cross_sum(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = float('-inf' ), -1
_UpperCamelCase , _UpperCamelCase : int = float('-inf' ), -1
_UpperCamelCase : int | float = 0
for i in range(UpperCAmelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCamelCase : Optional[int] = summ
_UpperCamelCase : Union[str, Any] = i
_UpperCamelCase : List[Any] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCamelCase : List[Any] = summ
_UpperCamelCase : List[str] = i
return max_left, max_right, (left_sum + right_sum)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [randint(1 , UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ )]
_UpperCamelCase : Optional[Any] = time.time()
max_subarray(UpperCAmelCase_ , 0 , input_size - 1 )
_UpperCamelCase : str = time.time()
return end - start
def A__ ( ):
_UpperCamelCase : Any = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCamelCase : Dict = [time_max_subarray(UpperCAmelCase_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
print(UpperCAmelCase_ , '\t\t' , UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_snake_case = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS)
_snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_snake_case = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
_lowerCAmelCase : Any = True
# Deal with multi-line cases
elif (
re.search(
rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCamelCase , )
is not None
):
_lowerCAmelCase : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCAmelCase : Optional[int] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCAmelCase : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
_lowerCAmelCase : List[str] = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
_lowerCAmelCase : Dict = True
if not attribute_used:
_lowerCAmelCase : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCAmelCase : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCAmelCase : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCAmelCase : Dict = True
elif attribute.endswith("_token_id" ):
_lowerCAmelCase : Any = True
# configuration class specific cases
if not case_allowed:
_lowerCAmelCase : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_lowerCAmelCase : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCAmelCase : List[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
_lowerCAmelCase : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCAmelCase : Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
_lowerCAmelCase : Any = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCAmelCase : Optional[Any] = inspect.getsourcefile(_lowerCamelCase )
_lowerCAmelCase : str = os.path.dirname(_lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCAmelCase : Any = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
_lowerCAmelCase : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
_lowerCAmelCase : List[str] = []
for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
_lowerCAmelCase : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCAmelCase : Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(_lowerCamelCase )
and issubclass(_lowerCamelCase , _lowerCamelCase )
and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_lowerCAmelCase : Optional[int] = check_config_attributes_being_used(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Dict = unused_attributes
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Dict = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 36 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ):
'''simple docstring'''
_UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(_SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
_UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""})
UpperCamelCase__ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase__ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase__ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
UpperCamelCase__ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""})
UpperCamelCase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowercase__ ( self : Optional[Any] )->int:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCAmelCase = DatasetDict()
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ):
_UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
_UpperCAmelCase = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names
_UpperCAmelCase , _UpperCAmelCase = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ):
_UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 260 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A : int = 2_56
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''melgan''']
def __init__( self : Any , __magic_name__ : SpectrogramNotesEncoder , __magic_name__ : SpectrogramContEncoder , __magic_name__ : TaFilmDecoder , __magic_name__ : DDPMScheduler , __magic_name__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE_ = math.log(1e-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE_ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE_ = 128
self.register_modules(
notes_encoder=__magic_name__ , continuous_encoder=__magic_name__ , decoder=__magic_name__ , scheduler=__magic_name__ , melgan=__magic_name__ , )
def __A ( self : List[Any] , __magic_name__ : str , __magic_name__ : List[Any]=(-1.0, 1.0) , __magic_name__ : Dict=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = output_range
if clip:
SCREAMING_SNAKE_CASE_ = torch.clip(__magic_name__ , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE_ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Dict=(-1.0, 1.0) , __magic_name__ : int=False ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = input_range
SCREAMING_SNAKE_CASE_ = torch.clip(__magic_name__ , __magic_name__ , __magic_name__ ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE_ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self : Any , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = input_tokens > 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.notes_encoder(
encoder_input_tokens=__magic_name__ , encoder_inputs_mask=__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.continuous_encoder(
encoder_inputs=__magic_name__ , encoder_inputs_mask=__magic_name__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = noise_time
if not torch.is_tensor(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE_ = self.decoder(
encodings_and_masks=__magic_name__ , decoder_input_tokens=__magic_name__ , decoder_noise_time=__magic_name__ )
return logits
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : List[List[int]] , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : int = 100 , __magic_name__ : bool = True , __magic_name__ : str = "numpy" , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__magic_name__ )}.''' )
SCREAMING_SNAKE_CASE_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__magic_name__ , device=self.device )
for i, encoder_input_tokens in enumerate(__magic_name__ ):
if i == 0:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__magic_name__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE_ = ones
SCREAMING_SNAKE_CASE_ = self.scale_features(
__magic_name__ , output_range=[-1.0, 1.0] , clip=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__magic_name__ , continuous_mask=__magic_name__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__magic_name__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE_ = self.decode(
encodings_and_masks=__magic_name__ , input_tokens=__magic_name__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ).prev_sample
SCREAMING_SNAKE_CASE_ = self.scale_to_features(__magic_name__ , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE_ = mel[:1]
SCREAMING_SNAKE_CASE_ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ )
logger.info("Generated segment" , __magic_name__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
SCREAMING_SNAKE_CASE_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE_ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__magic_name__ )
| 305 | from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.