code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Tuple = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
__UpperCAmelCase : Any = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCAmelCase : str = model(__lowercase )["""last_hidden_state"""]
__UpperCAmelCase : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
__UpperCAmelCase : str = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 63
|
from __future__ import annotations
def a ( a ) ->float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0.00
SCREAMING_SNAKE_CASE = 0
for resistor in resistors:
if resistor <= 0:
SCREAMING_SNAKE_CASE = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(a )
first_sum += 1 / float(a )
index += 1
return 1 / first_sum
def a ( a ) ->float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0.00
SCREAMING_SNAKE_CASE = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
SCREAMING_SNAKE_CASE = F"""Resistor at index {index} has a negative value!"""
raise ValueError(a )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase_ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__UpperCamelCase ,cache_dir=__UpperCamelCase )
lowercase_ : Dict = [t[-1] for t in os.walk(os.path.join(__UpperCamelCase ,os.listdir(__UpperCamelCase )[0] ,'snapshots' ) )]
lowercase_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__UpperCamelCase )
lowercase_ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase_ : Optional[int] = 4
lowercase_ : int = jax.device_count()
lowercase_ : Optional[int] = num_samples * [prompt]
lowercase_ : List[Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : Dict = replicate(__UpperCamelCase )
lowercase_ : Optional[Any] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = shard(__UpperCamelCase )
lowercase_ : List[Any] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
lowercase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__UpperCamelCase ) == num_samples
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='flax' ,safety_checker=__UpperCamelCase )
lowercase_ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : List[str] = jax.random.PRNGKey(0 )
lowercase_ : Tuple = 50
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : List[Any] = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[str] = replicate(__UpperCamelCase )
lowercase_ : Optional[int] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[Any] = shard(__UpperCamelCase )
lowercase_ : Optional[int] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase )
lowercase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Dict = jax.random.PRNGKey(0 )
lowercase_ : Optional[int] = 50
lowercase_ : Tuple = jax.device_count()
lowercase_ : Dict = num_samples * [prompt]
lowercase_ : Any = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : int = replicate(__UpperCamelCase )
lowercase_ : int = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = shard(__UpperCamelCase )
lowercase_ : List[str] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa )
lowercase_ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase_ : Union[str, Any] = 50
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : Tuple = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = shard(__UpperCamelCase )
lowercase_ : Optional[Any] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,set_alpha_to_one=__UpperCamelCase ,steps_offset=1 ,)
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,scheduler=__UpperCamelCase ,safety_checker=__UpperCamelCase ,)
lowercase_ : Optional[int] = scheduler.create_state()
lowercase_ : List[Any] = scheduler_state
lowercase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : str = jax.random.PRNGKey(0 )
lowercase_ : str = 50
lowercase_ : List[Any] = jax.device_count()
lowercase_ : List[str] = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : int = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = shard(__UpperCamelCase )
lowercase_ : str = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Union[str, Any] = jax.device_count()
lowercase_ : List[str] = num_samples * [prompt]
lowercase_ : int = jax.random.split(jax.random.PRNGKey(0 ) ,__UpperCamelCase )
lowercase_ , lowercase_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase ,)
lowercase_ : str = replicate(__UpperCamelCase )
lowercase_ : int = pipeline.prepare_inputs(__UpperCamelCase )
lowercase_ : Optional[int] = shard(__UpperCamelCase )
lowercase_ : Tuple = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase_ : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase_ , lowercase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase ,use_memory_efficient_attention=__UpperCamelCase ,)
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
lowercase_ : List[Any] = shard(__UpperCamelCase )
lowercase_ : Any = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 477
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE =random.Random()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None ):
if rng is None:
lowercase_ : Union[str, Any] = global_rng
lowercase_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=400 ,__UpperCamelCase=2000 ,__UpperCamelCase=10 ,__UpperCamelCase=160 ,__UpperCamelCase=8 ,__UpperCamelCase=0.0 ,__UpperCamelCase=4000 ,__UpperCamelCase=False ,__UpperCamelCase=True ,) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : Optional[Any] = batch_size
lowercase_ : Optional[int] = min_seq_length
lowercase_ : List[Any] = max_seq_length
lowercase_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : Tuple = padding_value
lowercase_ : Dict = sampling_rate
lowercase_ : List[str] = return_attention_mask
lowercase_ : str = do_normalize
lowercase_ : str = feature_size
lowercase_ : List[Any] = chunk_length
lowercase_ : Optional[int] = hop_length
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self ,__UpperCamelCase=False ,__UpperCamelCase=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(__UpperCamelCase ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
lowercase_ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
lowercase_ : int = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : int = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : List[Any] = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
lowercase_ : Optional[Any] = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = feat_extract_first.to_dict()
lowercase_ : int = feat_extract_second.to_dict()
lowercase_ : List[Any] = feat_extract_first.mel_filters
lowercase_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCamelCase )
lowercase_ : Dict = self.feature_extraction_class.from_json_file(__UpperCamelCase )
lowercase_ : Optional[Any] = feat_extract_first.to_dict()
lowercase_ : Optional[Any] = feat_extract_second.to_dict()
lowercase_ : List[str] = feat_extract_first.mel_filters
lowercase_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
lowercase_ : str = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase_ : Tuple = feature_extractor(__UpperCamelCase ,padding='max_length' ,return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase_ : List[str] = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_features
lowercase_ : List[str] = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_features
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
# Test batched
lowercase_ : Dict = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
lowercase_ : List[str] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : Optional[int] = np.asarray(__UpperCamelCase )
lowercase_ : List[str] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
lowercase_ : Dict = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
# Test truncation required
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
lowercase_ : Union[str, Any] = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
lowercase_ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase_ : int = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
lowercase_ : Tuple = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
lowercase_ : Optional[Any] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
import torch
lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = np.random.rand(100 ,32 ).astype(np.floataa )
lowercase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase_ : Union[str, Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
lowercase_ : int = ds.sort('id' ).select(range(__UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowercase_ : Optional[int] = self._load_datasamples(1 )
lowercase_ : Dict = WhisperFeatureExtractor()
lowercase_ : str = feature_extractor(__UpperCamelCase ,return_tensors='pt' ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,__UpperCamelCase ,atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[Any] = self._load_datasamples(1 )[0]
lowercase_ : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowercase_ : Tuple = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 477
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__A : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Optional[int] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
__A : Optional[Any] = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
__A : Any = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ElectraTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 130
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 ) -> int:
'''simple docstring'''
UpperCAmelCase = right or len(UpperCamelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCamelCase__ , UpperCamelCase__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase : Optional[Any] = '\\n\n'
lowercase : int = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowercase : List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A : Optional[int] = '''cuda'''
else:
A : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A : Union[str, Any] = AutoModelForCausalLM.from_pretrained(A__ )
A : List[Any] = model.to(A__ )
A : Optional[int] = AutoTokenizer.from_pretrained(A__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A : int = model.config.max_length - 1
else:
A : Optional[Any] = model.config.max_length
A : int = tokenizer(
A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , return_tensors='''pt''' , return_attention_mask=A__ , ).to(A__ )
A : Union[str, Any] = encodings['''input_ids''']
A : List[str] = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A : str = []
A : Optional[Any] = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(A__ ) , A__ ) ):
A : Optional[int] = min(start_index + batch_size , len(A__ ) )
A : Any = encoded_texts[start_index:end_index]
A : List[Any] = attn_masks[start_index:end_index]
if add_start_token:
A : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A__ )
A : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A__ ), attn_mask] , dim=1 )
A : str = encoded_batch
with torch.no_grad():
A : Optional[int] = model(A__ , attention_mask=A__ ).logits
A : str = out_logits[..., :-1, :].contiguous()
A : Dict = labels[..., 1:].contiguous()
A : Optional[int] = attn_mask[..., 1:].contiguous()
A : List[str] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A__ )}
| 700
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Dict = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A, A : List[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : Tuple = input('Enter numbers separated by a comma:\n').strip()
lowercase : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 343
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase ( self : str )-> int:
snake_case = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
snake_case = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
snake_case = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
snake_case = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
snake_case = text_generator.model.config.eos_token_id
snake_case = """<pad>"""
snake_case = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCAmelCase ( self : str )-> Tuple:
snake_case = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
snake_case = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
snake_case = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCAmelCase ( self : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Tuple )-> Dict:
snake_case = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase ( self : Dict )-> int:
snake_case = """Hello I believe in"""
snake_case = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
snake_case = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
snake_case = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCAmelCase ( self : str , __snake_case : int , __snake_case : Dict )-> List[str]:
snake_case = text_generator.model
snake_case = text_generator.tokenizer
snake_case = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
snake_case = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
snake_case = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
snake_case = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
snake_case = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
snake_case = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
snake_case = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
snake_case = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
snake_case = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_00 , max_new_tokens=20 )
snake_case = text_generator("""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
import torch
# Classic `model_kwargs`
snake_case = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase ( self : Dict )-> Dict:
import torch
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Tuple:
import torch
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCAmelCase ( self : str )-> Tuple:
snake_case = """Hello world"""
snake_case = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
snake_case = logging.get_logger("""transformers.generation.tf_utils""" )
else:
snake_case = logging.get_logger("""transformers.generation.utils""" )
snake_case = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
snake_case = text_generator(__snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
snake_case = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
snake_case = text_generator(__snake_case , max_length=10 )
self.assertNotIn(__snake_case , cl.out )
| 369
|
import cva
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : float , lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
if k in (0.04, 0.06):
__lowerCAmelCase : List[Any] = k
__lowerCAmelCase : str = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
"""simple docstring"""
return str(self.k )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = cva.imread(lowerCAmelCase , 0 )
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = img.shape
__lowerCAmelCase : list[list[int]] = []
__lowerCAmelCase : Dict = img.copy()
__lowerCAmelCase : Any = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB )
__lowerCAmelCase ,__lowerCAmelCase : List[str] = np.gradient(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = dx**2
__lowerCAmelCase : Dict = dy**2
__lowerCAmelCase : Any = dx * dy
__lowerCAmelCase : Dict = 0.04
__lowerCAmelCase : List[str] = self.window_size // 2
for y in range(lowerCAmelCase , h - offset ):
for x in range(lowerCAmelCase , w - offset ):
__lowerCAmelCase : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Optional[Any] = (wxx * wyy) - (wxy**2)
__lowerCAmelCase : List[Any] = wxx + wyy
__lowerCAmelCase : int = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCAmelCase = HarrisCorner(0.04, 3)
__UpperCAmelCase , __UpperCAmelCase = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 651
| 0
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
return x + 2
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[int] = 'x = 3'
a : int = {}
a : Optional[Any] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3})
a : int = 'x = y'
a : Tuple = {'y': 5}
a : List[str] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5})
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : int = 'y = add_two(x)'
a : List[str] = {'x': 3}
a : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_)
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5})
# Won't work without the tool
with CaptureStdout() as out:
a : List[Any] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
assert result is None
assert "tried to execute add_two" in out.out
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[Any] = 'x = 3'
a : int = {}
a : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3})
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[int] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
a : Tuple = {'x': 3}
a : List[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_)
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5})
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Dict = 'x = 3\ny = 5'
a : str = {}
a : Union[str, Any] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5})
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = 'text = f\'This is x: {x}.\''
a : Dict = {'x': 3}
a : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'})
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = 'if x <= 3:\n y = 2\nelse:\n y = 5'
a : Tuple = {'x': 3}
a : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2})
a : Any = {'x': 8}
a : Union[str, Any] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5})
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[str] = 'test_list = [x, add_two(x)]'
a : Any = {'x': 3}
a : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , [3, 5])
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]})
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = 'y = x'
a : List[str] = {'x': 3}
a : Any = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_)
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3})
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : List[Any] = 'test_list = [x, add_two(x)]\ntest_list[1]'
a : Any = {'x': 3}
a : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_)
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]})
a : Optional[int] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
a : List[Any] = {'x': 3}
a : int = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_)
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = 'x = 0\nfor i in range(3):\n x = i'
a : Tuple = {}
a : Optional[int] = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_)
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2})
| 610
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase : Union[str, Any] = {
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = BlenderbotSmallTokenizer
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str="<|endoftext|>" , UpperCAmelCase_ : Tuple="<|endoftext|>" , UpperCAmelCase_ : Optional[Any]="<|endoftext|>" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Dict=True , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase_ , merges=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , ) , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : Optional[Any] = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
a : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : int = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 610
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> str:
_a : int = parent
_a : str = batch_size
_a : Any = seq_length
_a : Tuple = is_training
_a : int = use_input_mask
_a : Any = use_token_type_ids
_a : List[Any] = use_labels
_a : Optional[int] = vocab_size
_a : str = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Tuple = intermediate_size
_a : Optional[int] = hidden_act
_a : str = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : Dict = type_sequence_label_size
_a : List[Any] = initializer_range
_a : Optional[Any] = num_labels
_a : Dict = num_choices
_a : Dict = scope
def __UpperCamelCase ( self ) -> Dict:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = None
if self.use_input_mask:
_a : str = random_attention_mask([self.batch_size, self.seq_length] )
_a : int = None
if self.use_token_type_ids:
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] = None
_a : Any = None
_a : Optional[Any] = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_a : List[Any] = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_a : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Any:
_a : Dict = True
_a : Tuple = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
_a : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
_a : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict:
_a : Dict = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
_a : Optional[Any] = True
_a : Tuple = True
_a : Optional[int] = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
_a : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
_a : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_a : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_a : Any = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['hidden_states'][0]
_a : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['hidden_states'][0]
# select random slice
_a : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_a : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Dict = config_and_inputs
_a : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : int = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = False
def __UpperCamelCase ( self ) -> str:
_a : Optional[int] = LlamaModelTester(self )
_a : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> List[Any]:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = 3
_a : Union[str, Any] = input_dict['input_ids']
_a : List[Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
_a : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a : Any = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = 3
_a : List[str] = 'single_label_classification'
_a : Union[str, Any] = input_dict['input_ids']
_a : str = input_ids.ne(1 ).to(lowerCamelCase_ )
_a : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a : str = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Tuple:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Union[str, Any] = 3
_a : str = 'multi_label_classification'
_a : Union[str, Any] = input_dict['input_ids']
_a : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
_a : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a : str = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Optional[int]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : Union[str, Any] = ids_tensor([1, 1_0] , config.vocab_size )
_a : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_a : int = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
_a : str = original_model(lowerCamelCase_ ).last_hidden_state
_a : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_a : Optional[Any] = {'type': scaling_type, 'factor': 10.0}
_a : Any = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
_a : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
_a : int = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> int:
_a : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_a : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_a : List[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Any = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_a : List[Any] = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
_a : Optional[int] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a : List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> Any:
_a : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_a : str = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
_a : int = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> Dict:
_a : Dict = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_a : Dict = model(torch.tensor(lowerCamelCase_ ) )
_a : str = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
_a : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : List[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_a : List[Any] = 'Simply put, the theory of relativity states that '
_a : List[Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_a : Any = tokenizer.encode(lowerCamelCase_ , return_tensors='pt' )
_a : Union[str, Any] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
_a : List[Any] = model.generate(lowerCamelCase_ , max_new_tokens=6_4 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
_a : Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 120
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Optional[Any]=1_3 , A_ : Dict=7 , A_ : Optional[Any]=True , A_ : Tuple=True , A_ : int=True , A_ : Union[str, Any]=True , A_ : List[str]=9_9 , A_ : Optional[int]=3_2 , A_ : Any=2 , A_ : Tuple=4 , A_ : Dict=3_7 , A_ : Optional[Any]="gelu" , A_ : Optional[Any]=0.1 , A_ : Dict=0.1 , A_ : str=5_1_2 , A_ : str=1_6 , A_ : str=2 , A_ : List[str]=0.02 , A_ : Union[str, Any]=3 , A_ : Optional[int]=4 , A_ : Tuple=None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[str] = 1_3
_lowerCAmelCase : Union[str, Any] = 7
_lowerCAmelCase : str = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = 9_9
_lowerCAmelCase : Optional[int] = 3_2
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : List[str] = 4
_lowerCAmelCase : List[str] = 3_7
_lowerCAmelCase : int = "gelu"
_lowerCAmelCase : List[str] = 0.1
_lowerCAmelCase : Optional[Any] = 0.1
_lowerCAmelCase : List[str] = 5_1_2
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : int = 2
_lowerCAmelCase : Dict = 0.02
_lowerCAmelCase : Dict = 3
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : Optional[Any] = None
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Dict , A_ : str , A_ : Any , A_ : List[str] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = TFRoFormerModel(config=A_ )
_lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : int = [input_ids, input_mask]
_lowerCAmelCase : Tuple = model(A_ )
_lowerCAmelCase : Dict = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str , A_ : Any , A_ : Dict , A_ : Tuple , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : int , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = True
_lowerCAmelCase : Any = TFRoFormerForCausalLM(config=A_ )
_lowerCAmelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : Union[str, Any] = model(A_ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __magic_name__ ( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Dict , A_ : List[Any] , A_ : List[str] , A_ : Optional[Any] , A_ : Optional[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=A_ )
_lowerCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : List[str] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , A_ : str , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Dict , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : str = TFRoFormerForSequenceClassification(config=A_ )
_lowerCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : Dict , A_ : Dict , A_ : Dict , A_ : List[str] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFRoFormerForMultipleChoice(config=A_ )
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : str = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Dict , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : Optional[int] = TFRoFormerForTokenClassification(config=A_ )
_lowerCAmelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple , A_ : Union[str, Any] , A_ : List[str] , A_ : str , A_ : List[str] , A_ : Dict , A_ : str , A_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFRoFormerForQuestionAnswering(config=A_ )
_lowerCAmelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
_lowerCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : Optional[Any] = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : List[str] = False
def __magic_name__ ( self : Tuple , A_ : Dict , A_ : List[str] , A_ : List[Any] , A_ : Optional[int] , A_ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = TFRoFormerModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=3_7 )
def __magic_name__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(A_ )
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Union[str, Any] = model(A_ )[0]
# TODO Replace vocab size
_lowerCAmelCase : str = 5_0_0_0_0
_lowerCAmelCase : int = [1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_lowerCAmelCase : str = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = 1e-4
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tf.constant([[4, 1_0]] )
_lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_lowerCAmelCase : int = emba(input_ids.shape )
_lowerCAmelCase : Tuple = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_lowerCAmelCase : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
_lowerCAmelCase : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = 1e-4
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
_lowerCAmelCase : Union[str, Any] = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
_lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
_lowerCAmelCase : List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
_lowerCAmelCase : List[Any] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_lowerCAmelCase : int = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 503
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
_lowerCAmelCase : str = tesseract_config if tesseract_config is not None else ""
# apply OCR
_lowerCAmelCase : List[str] = to_pil_image(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : List[str] = pil_image.size
_lowerCAmelCase : Union[str, Any] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type="dict" , config=SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_lowerCAmelCase : Dict = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()]
_lowerCAmelCase : Any = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[int] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : Union[str, Any] = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Tuple = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
_lowerCAmelCase : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( A ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self : Tuple , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Dict , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : int = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
_lowerCAmelCase : Any = get_size_dict(A_ )
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Any = size
_lowerCAmelCase : Dict = resample
_lowerCAmelCase : Optional[int] = apply_ocr
_lowerCAmelCase : Dict = ocr_lang
_lowerCAmelCase : Optional[int] = tesseract_config
def __magic_name__ ( self : Dict , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowerCAmelCase : Dict = (size["height"], size["width"])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Dict , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Dict , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : List[Any] = get_size_dict(A_ )
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : str = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : Union[str, Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = [to_numpy_array(A_ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = []
for image in images:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
_lowerCAmelCase : Union[str, Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Optional[int] = [flip_channel_order(A_ ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCAmelCase : Tuple = BatchFeature(data={"pixel_values": images} , tensor_type=A_ )
if apply_ocr:
_lowerCAmelCase : Tuple = words_batch
_lowerCAmelCase : Any = boxes_batch
return data
| 503
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCAmelCase ( _A ):
def __init__( self : Tuple ) -> int:
# test for the above condition
self.test()
def A ( self : int ) -> Optional[Any]:
lowercase_ : Dict = 0
lowercase_ : List[str] = False
while not completed:
if counter == 1:
self.reset()
lowercase_ : Tuple = self.advance()
if not self.does_advance(A ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase_ , lowercase_ , lowercase_ : str = self.update(A )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def A ( self : str ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : Any , A : int ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : int , A : int ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : Tuple ) -> List[Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : str ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : List[Any] , A : Optional[int]=False ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : List[int] ) -> Optional[Any]:
super(A , self ).__init__()
if not isinstance(A , A ) or len(A ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A , A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase_ : List[Any] = token_ids
lowercase_ : str = len(self.token_ids )
lowercase_ : Tuple = -1 # the index of the currently fulfilled step
lowercase_ : Tuple = False
def A ( self : List[str] ) -> Tuple:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def A ( self : str , A : int ) -> List[Any]:
if not isinstance(A , A ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def A ( self : List[str] , A : int ) -> Optional[int]:
if not isinstance(A , A ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A )}''' )
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[Any] = False
lowercase_ : int = False
if self.does_advance(A ):
self.fulfilled_idx += 1
lowercase_ : str = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase_ : Any = True
lowercase_ : int = completed
else:
# failed to make progress.
lowercase_ : Any = True
self.reset()
return stepped, completed, reset
def A ( self : Optional[int] ) -> str:
lowercase_ : str = False
lowercase_ : str = 0
def A ( self : int ) -> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def A ( self : int , A : List[Any]=False ) -> List[str]:
lowercase_ : Optional[int] = PhrasalConstraint(self.token_ids )
if stateful:
lowercase_ : Union[str, Any] = self.seqlen
lowercase_ : Any = self.fulfilled_idx
lowercase_ : List[Any] = self.completed
return new_constraint
class _UpperCAmelCase :
def __init__( self : Optional[int] , A : List[List[int]] , A : Tuple=True ) -> Optional[Any]:
lowercase_ : str = max([len(A ) for one in nested_token_ids] )
lowercase_ : List[str] = {}
for token_ids in nested_token_ids:
lowercase_ : Optional[Any] = root
for tidx, token_id in enumerate(A ):
if token_id not in level:
lowercase_ : str = {}
lowercase_ : int = level[token_id]
if no_subsets and self.has_subsets(A , A ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F''' {nested_token_ids}.''' )
lowercase_ : Union[str, Any] = root
def A ( self : int , A : List[str] ) -> Optional[int]:
lowercase_ : int = self.trie
for current_token in current_seq:
lowercase_ : Tuple = start[current_token]
lowercase_ : Union[str, Any] = list(start.keys() )
return next_tokens
def A ( self : Optional[int] , A : Optional[Any] ) -> str:
lowercase_ : List[str] = self.next_tokens(A )
return len(A ) == 0
def A ( self : Dict , A : int ) -> Optional[Any]:
lowercase_ : Tuple = list(root.values() )
if len(A ) == 0:
return 1
else:
return sum([self.count_leaves(A ) for nn in next_nodes] )
def A ( self : Union[str, Any] , A : Any , A : int ) -> str:
lowercase_ : Dict = self.count_leaves(A )
return len(A ) != leaf_count
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : List[List[int]] ) -> List[Any]:
super(A , self ).__init__()
if not isinstance(A , A ) or len(A ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A , A ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A , A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase_ : Any = DisjunctiveTrie(A )
lowercase_ : int = nested_token_ids
lowercase_ : List[str] = self.trie.max_height
lowercase_ : str = []
lowercase_ : int = False
def A ( self : Optional[Any] ) -> List[Any]:
lowercase_ : int = self.trie.next_tokens(self.current_seq )
if len(A ) == 0:
return None
else:
return token_list
def A ( self : Tuple , A : int ) -> str:
if not isinstance(A , A ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}''' )
lowercase_ : Union[str, Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def A ( self : List[Any] , A : int ) -> int:
if not isinstance(A , A ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}''' )
lowercase_ : Dict = False
lowercase_ : List[Any] = False
lowercase_ : List[str] = False
if self.does_advance(A ):
self.current_seq.append(A )
lowercase_ : Optional[int] = True
else:
lowercase_ : List[str] = True
self.reset()
lowercase_ : Any = self.trie.reached_leaf(self.current_seq )
lowercase_ : str = completed
return stepped, completed, reset
def A ( self : Union[str, Any] ) -> int:
lowercase_ : str = False
lowercase_ : Optional[int] = []
def A ( self : Optional[Any] ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def A ( self : int , A : Optional[int]=False ) -> Tuple:
lowercase_ : Tuple = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase_ : Optional[int] = self.seqlen
lowercase_ : Dict = self.current_seq
lowercase_ : List[Any] = self.completed
return new_constraint
class _UpperCAmelCase :
def __init__( self : List[Any] , A : List[Constraint] ) -> str:
lowercase_ : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
lowercase_ : Tuple = max([c.seqlen for c in constraints] )
lowercase_ : Union[str, Any] = len(A )
lowercase_ : Union[str, Any] = False
self.init_state()
def A ( self : Any ) -> Optional[Any]:
lowercase_ : Dict = []
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = [constraint.copy(stateful=A ) for constraint in self.constraints]
def A ( self : Optional[Any] ) -> Any:
lowercase_ : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def A ( self : Any ) -> Any:
lowercase_ : Any = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase_ : Optional[int] = constraint.advance()
if isinstance(A , A ):
token_list.append(A )
elif isinstance(A , A ):
token_list.extend(A )
else:
lowercase_ : str = self.inprogress_constraint.advance()
if isinstance(A , A ):
token_list.append(A )
elif isinstance(A , A ):
token_list.extend(A )
if len(A ) == 0:
return None
else:
return token_list
def A ( self : Dict , A : Optional[List[int]] ) -> List[str]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase_ , lowercase_ : List[Any] = self.add(A )
# the entire list of constraints are fulfilled
if self.completed:
break
def A ( self : Any , A : int ) -> List[str]:
if not isinstance(A , A ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase_ , lowercase_ : int = False, False
if self.completed:
lowercase_ : str = True
lowercase_ : Tuple = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase_ , lowercase_ , lowercase_ : List[Any] = self.inprogress_constraint.update(A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) )
lowercase_ : Union[str, Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase_ : Union[str, Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase_ : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A ):
lowercase_ , lowercase_ , lowercase_ : List[str] = pending_constraint.update(A )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(A )
lowercase_ : Optional[int] = None
if not complete and stepped:
lowercase_ : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase_ : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase_ : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def A ( self : Optional[Any] , A : str=True ) -> Tuple:
lowercase_ : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase_ : Dict = [
constraint.copy(stateful=A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase_ : str = self.inprogress_constraint.copy(stateful=A )
lowercase_ : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 231
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "timesformer"
def __init__( self : Optional[Any] , A : Tuple=2_24 , A : Optional[int]=16 , A : Any=3 , A : str=8 , A : Optional[Any]=7_68 , A : Dict=12 , A : Optional[int]=12 , A : Optional[Any]=30_72 , A : Optional[Any]="gelu" , A : Union[str, Any]=0.0 , A : Dict=0.0 , A : str=0.02 , A : Union[str, Any]=1e-6 , A : Union[str, Any]=True , A : Dict="divided_space_time" , A : Optional[Any]=0 , **A : List[str] , ) -> Tuple:
super().__init__(**A )
lowercase_ : Tuple = image_size
lowercase_ : str = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Optional[Any] = num_frames
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : List[str] = qkv_bias
lowercase_ : Any = attention_type
lowercase_ : Dict = drop_path_rate
| 231
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _a , )
super().__init__(args=_a , **_a )
| 720
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = AltDiffusionPipeline
UpperCAmelCase__: Tuple = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__: Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ):
torch.manual_seed(0 )
A__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A__ : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
A__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
A__ : Tuple = CLIPTextModel(A__ )
A__ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ : Union[str, Any] = 77
A__ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Dict = torch.manual_seed(A__ )
else:
A__ : str = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __A ( self ):
A__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
torch.manual_seed(0 )
A__ : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
A__ : Tuple = RobertaSeriesModelWithTransformation(A__ )
A__ : Optional[int] = text_encoder
A__ : Optional[int] = AltDiffusionPipeline(**A__ )
A__ : str = alt_pipe.to(A__ )
alt_pipe.set_progress_bar_config(disable=A__ )
A__ : List[str] = self.get_dummy_inputs(A__ )
A__ : List[str] = """A photo of an astronaut"""
A__ : Optional[int] = alt_pipe(**A__ )
A__ : List[Any] = output.images
A__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
A__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Any = PNDMScheduler(skip_prk_steps=A__ )
torch.manual_seed(0 )
A__ : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
A__ : Optional[Any] = RobertaSeriesModelWithTransformation(A__ )
A__ : List[Any] = text_encoder
A__ : Optional[Any] = AltDiffusionPipeline(**A__ )
A__ : Union[str, Any] = alt_pipe.to(A__ )
alt_pipe.set_progress_bar_config(disable=A__ )
A__ : Optional[Any] = self.get_dummy_inputs(A__ )
A__ : Dict = alt_pipe(**A__ )
A__ : str = output.images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Dict = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
# make sure here that pndm scheduler skips prk
A__ : Any = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=A__ )
A__ : Union[str, Any] = alt_pipe.to(A__ )
alt_pipe.set_progress_bar_config(disable=A__ )
A__ : int = """A painting of a squirrel eating a burger"""
A__ : Any = torch.manual_seed(0 )
A__ : List[Any] = alt_pipe([prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
A__ : List[Any] = output.images
A__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : int = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
A__ : Optional[Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
A__ : Optional[Any] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=A__ , safety_checker=A__ )
A__ : Optional[Any] = alt_pipe.to(A__ )
alt_pipe.set_progress_bar_config(disable=A__ )
A__ : Union[str, Any] = """A painting of a squirrel eating a burger"""
A__ : Dict = torch.manual_seed(0 )
A__ : List[Any] = alt_pipe([prompt] , generator=A__ , num_inference_steps=2 , output_type="""numpy""" )
A__ : str = output.images
A__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : List[str] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 456
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = RoCBertTokenizer
UpperCAmelCase__: Dict = None
UpperCAmelCase__: Optional[Any] = False
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: Union[str, Any] = filter_non_english
def __A ( self ):
super().setUp()
A__ : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
A__ : Union[str, Any] = {}
A__ : Dict = {}
for i, value in enumerate(A__ ):
A__ : Optional[int] = i
A__ : Optional[int] = i
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
A__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
def __A ( self ):
A__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Optional[Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(A__ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
def __A ( self ):
A__ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ):
A__ : List[str] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : int = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ):
A__ : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
A__ : Any = {}
for i, token in enumerate(A__ ):
A__ : Optional[int] = i
A__ : Optional[Any] = RoCBertWordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __A ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __A ( self ):
A__ : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
A__ : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Dict = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A__ : Union[str, Any] = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
A__ : Any = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
A__ : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __A ( self ):
A__ : Union[str, Any] = ["""的""", """人""", """有"""]
A__ : List[str] = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Any = True
A__ : int = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Dict = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : List[Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : Tuple = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : int = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
A__ : Optional[int] = False
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : str = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Tuple = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : Tuple = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : str = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
@slow
def __A ( self ):
A__ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=A__ )
A__ : List[Any] = tokenizer.encode("""你是谁""" , add_special_tokens=A__ )
A__ : Any = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : Any = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __A ( self ):
A__ : List[str] = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
A__ : Optional[int] = """你好,你是谁"""
A__ : List[str] = tokenizer.tokenize(A__ )
A__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(A__ )
A__ : str = tokenizer.convert_tokens_to_shape_ids(A__ )
A__ : Optional[int] = tokenizer.convert_tokens_to_pronunciation_ids(A__ )
A__ : Union[str, Any] = tokenizer.prepare_for_model(
A__ , A__ , A__ , add_special_tokens=A__ )
A__ : int = tokenizer.encode_plus(A__ , add_special_tokens=A__ )
self.assertEqual(A__ , A__ )
| 456
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 17
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
| 17
| 1
|
lowerCamelCase_ = {
'''joule''': 1.0,
'''kilojoule''': 10_00,
'''megajoule''': 1_00_00_00,
'''gigajoule''': 10_00_00_00_00,
'''wattsecond''': 1.0,
'''watthour''': 36_00,
'''kilowatthour''': 3_60_00_00,
'''newtonmeter''': 1.0,
'''calorie_nutr''': 4186.8,
'''kilocalorie_nutr''': 4_18_68_00.00,
'''electronvolt''': 1.602_176_634E-19,
'''britishthermalunit_it''': 1055.05585,
'''footpound''': 1.355818,
}
def __magic_name__ ( __a : str , __a : str , __a : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(snake_case__ )}"
)
raise ValueError(snake_case__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513
|
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case__: np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCAmelCase__ , UpperCAmelCase__ = np.shape(snake_case__ )
if rows != columns:
UpperCAmelCase__ = (
'\'table\' has to be of square shaped array but got a '
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(snake_case__ )
UpperCAmelCase__ = np.zeros((rows, columns) )
UpperCAmelCase__ = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCAmelCase__ = (table[i][j] - total) / upper[j][j]
UpperCAmelCase__ = 1
for j in range(snake_case__ , snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
UpperCAmelCase__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : str = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = """xmod"""
def __init__( self : str , UpperCamelCase__ : Optional[Any]=30522 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : List[str]=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[Any]=1E-12 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : int=True , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=("en_XX",) , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any =vocab_size
A__ : List[Any] =hidden_size
A__ : Tuple =num_hidden_layers
A__ : int =num_attention_heads
A__ : List[str] =hidden_act
A__ : int =intermediate_size
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : int =initializer_range
A__ : Union[str, Any] =layer_norm_eps
A__ : List[Any] =position_embedding_type
A__ : Optional[Any] =use_cache
A__ : Tuple =classifier_dropout
A__ : int =pre_norm
A__ : Optional[int] =adapter_reduction_factor
A__ : Union[str, Any] =adapter_layer_norm
A__ : str =adapter_reuse_layer_norm
A__ : List[Any] =ln_before_adapter
A__ : str =list(UpperCamelCase__ )
A__ : Union[str, Any] =default_language
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
A__ : int ={0: "batch", 1: "choice", 2: "sequence"}
else:
A__ : int ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 595
|
"""simple docstring"""
__A : int = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Union[str, Any] ={"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
A__ : Tuple =0
A__ : List[str] =0
while place < len(UpperCamelCase ):
if (place + 1 < len(UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =[]
for arabic, roman in ROMAN:
((A__) , (A__)) : Union[str, Any] =divmod(UpperCamelCase , UpperCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Any = GPTSanJapaneseTokenizer
a : Tuple = False
a : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False}
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
__lowercase = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__lowercase = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file ,'''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(_lowerCamelCase ) )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.get_input_output_texts(_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# Testing tokenization
__lowercase = '''こんにちは、世界。 こんばんは、㔺界。'''
__lowercase = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
# Testing conversion to ids without special tokens
__lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
# Testing conversion to ids with special tokens
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# Testing tokenization
__lowercase = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__lowercase = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__lowercase = tokenizer.encode(_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowercase = '''こんにちは、世界。'''
__lowercase = '''こんばんは、㔺界。😀'''
__lowercase = '''こんにちは、世界。こんばんは、世界。😀'''
__lowercase = tokenizer.encode(prefix_text + input_text )
__lowercase = tokenizer.encode('''''' ,prefix_text=prefix_text + input_text )
__lowercase = tokenizer.encode(_lowerCamelCase ,prefix_text=_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowercase = '''こんにちは、世界。'''
__lowercase = '''こんばんは、㔺界。😀'''
__lowercase = len(tokenizer.encode(_lowerCamelCase ) ) - 2
__lowercase = len(tokenizer.encode(_lowerCamelCase ) ) - 2
__lowercase = [1] + [0] * (len_prefix + len_text + 1)
__lowercase = [1] * (len_prefix + len_text + 1) + [0]
__lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__lowercase = tokenizer(prefix_text + input_text ).token_type_ids
__lowercase = tokenizer('''''' ,prefix_text=prefix_text + input_text ).token_type_ids
__lowercase = tokenizer(_lowerCamelCase ,prefix_text=_lowerCamelCase ).token_type_ids
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowercase = tokenizer.encode('''あンいワ''' )
__lowercase = tokenizer.encode('''''' ,prefix_text='''あンいワ''' )
__lowercase = tokenizer.encode('''いワ''' ,prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(_lowerCamelCase ) ,tokenizer.decode(_lowerCamelCase ) )
self.assertEqual(tokenizer.decode(_lowerCamelCase ) ,tokenizer.decode(_lowerCamelCase ) )
self.assertNotEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowercase = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase )
__lowercase = tokenizer.batch_encode_plus(_lowerCamelCase ,padding=_lowerCamelCase )
# fmt: off
__lowercase = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,_lowerCamelCase )
self.assertListEqual(x_token.token_type_ids ,_lowerCamelCase )
self.assertListEqual(x_token.attention_mask ,_lowerCamelCase )
self.assertListEqual(x_token_a.input_ids ,_lowerCamelCase )
self.assertListEqual(x_token_a.token_type_ids ,_lowerCamelCase )
self.assertListEqual(x_token_a.attention_mask ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
| 502
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list[int | str] ):
create_state_space_tree(lowerCamelCase_ , [] , 0 , [0 for i in range(len(lowerCamelCase_ ) )] )
def _lowerCAmelCase ( lowerCamelCase_ : list[int | str] , lowerCamelCase_ : list[int | str] , lowerCamelCase_ : int , lowerCamelCase_ : list[int] , ):
if index == len(lowerCamelCase_ ):
print(lowerCamelCase_ )
return
for i in range(len(lowerCamelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowercase = True
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_ )
current_sequence.pop()
__lowercase = False
_SCREAMING_SNAKE_CASE = [3, 1, 2, 4]
generate_all_permutations(sequence)
_SCREAMING_SNAKE_CASE = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 502
| 1
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase ) -> Optional[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = []
if args.gold_data_mode == "qa":
__snake_case = pd.read_csv(_lowerCAmelCase , sep="\t" , header=_lowerCAmelCase )
for answer_list in data[1]:
__snake_case = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = [[reference] for reference in references]
__snake_case = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case = 1_00.0 * em / total
__snake_case = 1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__snake_case = args.k
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = set(hypo.split("\t" )[:k] )
__snake_case = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__snake_case = 1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
def strip_title(_UpperCAmelCase ):
if title.startswith("\"" ):
__snake_case = title[1:]
if title.endswith("\"" ):
__snake_case = title[:-1]
return title
__snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )["input_ids"].to(args.device )
__snake_case = rag_model.rag.question_encoder(_lowerCAmelCase )
__snake_case = question_enc_outputs[0]
__snake_case = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
__snake_case = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__snake_case = []
for docs in all_docs:
__snake_case = [strip_title(_lowerCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCAmelCase ) )
return provenance_strings
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with torch.no_grad():
__snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
__snake_case = inputs_dict.input_ids.to(args.device )
__snake_case = inputs_dict.attention_mask.to(args.device )
__snake_case = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__snake_case = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("Q: {} - A: {}".format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def __UpperCAmelCase ( ) -> List[str]:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCAmelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
__snake_case = parser.parse_args()
__snake_case = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __UpperCAmelCase ( _UpperCAmelCase ) -> List[Any]:
__snake_case = {}
if args.model_type is None:
__snake_case = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__snake_case = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
__snake_case = args.n_docs
if args.index_name is not None:
__snake_case = args.index_name
if args.index_path is not None:
__snake_case = args.index_path
else:
__snake_case = BartForConditionalGeneration
__snake_case = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCAmelCase )
__snake_case = get_scores if args.eval_mode == "e2e" else get_precision_at_k
__snake_case = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__snake_case = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
__snake_case = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
__snake_case = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
__snake_case = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) + "\n" )
preds_file.flush()
__snake_case = []
if len(_lowerCAmelCase ) > 0:
__snake_case = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a : Any = get_args()
main(args)
| 711
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case__ : Tuple = logging.get_logger(__name__)
class snake_case ( _snake_case ):
'''simple docstring'''
def __init__( self : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ) ->Any:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 392
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28
| 0
|
import datasets
lowerCAmelCase_ = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowerCAmelCase_ = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowerCAmelCase_ = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def snake_case( __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : Any ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __a ( self : Dict , _A : List[Any] , _A : Optional[Any] ) -> str:
"""simple docstring"""
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
| 718
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
_UpperCamelCase : Dict = None
@experimental
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
lowercase : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
lowercase : Optional[int] = len(__magic_name__ ) // num_proc
lowercase : List[str] = len(__magic_name__ ) % num_proc
lowercase : Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ )
lowercase : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(__magic_name__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowercase , lowercase : Optional[int] = None, None
if not disable_tqdm:
lowercase , lowercase : Any = (RLock(),), tqdm.set_lock
with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool:
lowercase : Tuple = pool.map(__magic_name__ , __magic_name__ )
logger.info(F"""Finished {num_proc} processes""" )
lowercase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(__magic_name__ )} objects""" )
return mapped
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : List[Any] = None
| 596
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase_ ( __a ) -> List[Tuple[int, ...]]:
a__ : Any = []
if isinstance(__a , __a ):
for v in tree.values():
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def UpperCamelCase_ ( __a , __a ) -> Tuple[int, ...]:
a__ : Tuple = []
for d in reversed(__a ):
idx.append(flat_idx % d )
a__ : Tuple = flat_idx // d
return tuple(reversed(__a ) )
@torch.jit.ignore
def UpperCamelCase_ ( __a , __a , __a , __a = None , __a = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__a ) -> None:
a__ : List[str] = True
for i in range(len(__a ) ):
a__ : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Optional[int] = l[reversed_idx]
if start_edges is None:
a__ : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(__a )
if end_edges is None:
a__ : List[Any] = [e == (d - 1) for e, d in zip(__a , __a )]
reduce_edge_list(__a )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__a ) == 0:
return [()]
elif len(__a ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__a , __a ):
if s == e:
path_list.append(slice(__a , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(__a )
a__ : Optional[int] = len(__a )
# start == end, and we're done
if divergence_idx == len(__a ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : str = start[divergence_idx]
return tuple(
path + (slice(__a , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Dict = end[divergence_idx]
return tuple(
path + (slice(__a , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Any = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase_ ( __a , __a , __a , __a ) -> torch.Tensor:
a__ : str = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(__a , __a ) )
# _get_minimal_slice_set is inclusive
a__ : Union[str, Any] = list(_flat_idx_to_idx(flat_end - 1 , __a ) )
# Get an ordered list of slices to perform
a__ : Tuple = _get_minimal_slice_set(
__a , __a , __a , )
a__ : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase_ ( __a , __a , __a , __a , __a = False , __a = None , __a = False , ) -> Any:
if not (len(__a ) > 0):
raise ValueError("Must provide at least one input" )
a__ : Optional[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__a )]
a__ : Optional[int] = tuple([max(__a ) for s in zip(*__a )] )
def _prep_inputs(__a ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , __a )
a__ : Tuple = None
if _out is not None:
a__ : Tuple = tensor_tree_map(lambda __a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Optional[int] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__a ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : List[str] = 0
a__ : Tuple = prepped_outputs
for _ in range(__a ):
# Chunk the input
if not low_mem:
a__ : int = _select_chunk
else:
a__ : List[str] = partial(
_chunk_slice , flat_start=__a , flat_end=min(__a , i + chunk_size ) , no_batch_dims=len(__a ) , )
a__ : Dict[str, Any] = tensor_tree_map(__a , __a )
# Run the layer on the chunk
a__ : List[Any] = layer(**__a )
# Allocate space for the output
if out is None:
a__ : List[str] = tensor_tree_map(lambda __a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __a )
# Put the chunk in its pre-allocated space
if isinstance(__a , __a ):
def assign(__a , __a ) -> None:
for k, v in da.items():
if isinstance(__a , __a ):
assign(__a , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Optional[Any] = da[k]
assign(__a , __a )
elif isinstance(__a , __a ):
for xa, xa in zip(__a , __a ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : str = xa
elif isinstance(__a , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Union[str, Any] = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda __a : t.view(orig_batch_dims + t.shape[1:] ) , __a )
return out
class A__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : int = 512 , ):
a__ : Tuple = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _UpperCamelCase( self : str , lowerCamelCase__ : Callable , lowerCamelCase__ : tuple , lowerCamelCase__ : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : int = [c for c in candidates if c > min_chunk_size]
a__ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCamelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowerCamelCase__ , chunk_size=lowerCamelCase__ )
return True
except RuntimeError:
return False
a__ : List[str] = 0
a__ : Any = len(lowerCamelCase__ ) - 1
while i > min_viable_chunk_size_index:
a__ : str = test_chunk_size(candidates[i] )
if not viable:
a__ : List[str] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : str = (i + len(lowerCamelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Iterable , lowerCamelCase__ : Iterable ):
a__ : Union[str, Any] = True
for aa, aa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert type(lowerCamelCase__ ) == type(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : int = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )]
a__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )]
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
else:
consistent &= aa == aa
return consistent
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Callable , lowerCamelCase__ : tuple , lowerCamelCase__ : int , ):
a__ : List[str] = True
a__ : tuple = tree_map(lambda lowerCamelCase__ : a.shape if isinstance(lowerCamelCase__ , torch.Tensor ) else a , lowerCamelCase__ , lowerCamelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCamelCase__ )
a__ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , lowerCamelCase__ )
else:
# Otherwise, we can reuse the precomputed value
a__ : Union[str, Any] = False
if not consistent:
a__ : str = self._determine_favorable_chunk_size(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 37
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : Dict = get_tests_dir('fixtures')
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 500
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__snake_case ) as mock_head:
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase__ ( cls : Any ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ) -> Tuple:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__snake_case , repo_id='test-feature-extractor' , push_to_hub=__snake_case , use_auth_token=self._token )
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__snake_case , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=__snake_case , use_auth_token=self._token )
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowerCamelCase = CustomFeatureExtractor.from_pretrained(__snake_case )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
lowerCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 246
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=lowerCamelCase , vae=lowerCamelCase , scheduler=lowerCamelCase )
# create a imagenet -> id dictionary for easier use
a__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
a__ = int(lowerCamelCase )
a__ = dict(sorted(self.labels.items() ) )
def _A ( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
a__ = list(lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCamelCase , lowerCamelCase = 4.0 , lowerCamelCase = None , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , ):
'''simple docstring'''
a__ = len(lowerCamelCase )
a__ = self.transformer.config.sample_size
a__ = self.transformer.config.in_channels
a__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase , device=self.device , dtype=self.transformer.dtype , )
a__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a__ = torch.tensor(lowerCamelCase , device=self.device ).reshape(-1 )
a__ = torch.tensor([1000] * batch_size , device=self.device )
a__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a__ = latent_model_input[: len(lowerCamelCase ) // 2]
a__ = torch.cat([half, half] , dim=0 )
a__ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
a__ = t
if not torch.is_tensor(lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a__ = latent_model_input.device.type == """mps"""
if isinstance(lowerCamelCase , lowerCamelCase ):
a__ = torch.floataa if is_mps else torch.floataa
else:
a__ = torch.intaa if is_mps else torch.intaa
a__ = torch.tensor([timesteps] , dtype=lowerCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a__ = self.transformer(
lowerCamelCase , timestep=lowerCamelCase , class_labels=lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
a__ , a__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a__ , a__ = torch.split(lowerCamelCase , len(lowerCamelCase ) // 2 , dim=0 )
a__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a__ = torch.cat([half_eps, half_eps] , dim=0 )
a__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a__ , a__ = torch.split(lowerCamelCase , lowerCamelCase , dim=1 )
else:
a__ = noise_pred
# compute previous image: x_t -> x_t-1
a__ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
if guidance_scale > 1:
a__ , a__ = latent_model_input.chunk(2 , dim=0 )
else:
a__ = latent_model_input
a__ = 1 / self.vae.config.scaling_factor * latents
a__ = self.vae.decode(lowerCamelCase ).sample
a__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase )
| 708
|
def UpperCAmelCase ( lowercase__ : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
| 86
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowercase (UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
_snake_case = "pixel_values"
_snake_case = False
_snake_case = TimmBackboneConfig
def __init__( self , A , **A ) -> Dict:
requires_backends(self , """timm""" )
super().__init__(lowerCamelCase__ )
snake_case : Any = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
snake_case : Optional[Any] = getattr(lowerCamelCase__ , """use_pretrained_backbone""" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case : int = config.out_indices if getattr(lowerCamelCase__ , """out_indices""" , lowerCamelCase__ ) is not None else (-1,)
snake_case : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case : List[str] = self._backbone.return_layers
snake_case : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def UpperCAmelCase ( cls , A , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case : Any = kwargs.pop("""config""" , TimmBackboneConfig() )
snake_case : Dict = kwargs.pop("""use_timm_backbone""" , lowerCamelCase__ )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
snake_case : str = kwargs.pop("""num_channels""" , config.num_channels )
snake_case : Dict = kwargs.pop("""features_only""" , config.features_only )
snake_case : str = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
snake_case : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
snake_case : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , A ) -> Optional[int]:
pass
def UpperCAmelCase ( self , A , A=None , A=None , A=None , **A ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case : Optional[int] = self._all_layers
snake_case : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
snake_case : List[Any] = self._return_layers
snake_case : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
snake_case : Tuple = None
snake_case : Dict = tuple(lowerCamelCase__ )
snake_case : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
snake_case : Dict = (feature_maps,)
if output_hidden_states:
snake_case : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 718
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684
| 0
|
import enum
import shutil
import sys
__snake_case , __snake_case : List[Any] =shutil.get_terminal_size()
__snake_case : Tuple ={'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCamelCase__ ( enum.Enum):
'''simple docstring'''
snake_case_ =0
snake_case_ =1
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Optional[Any]=""):
'''simple docstring'''
sys.stdout.write(str(__UpperCamelCase) + end)
sys.stdout.flush()
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : str ,lowerCamelCase_ : str=""):
'''simple docstring'''
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" ,__UpperCamelCase)
def lowerCAmelCase__ ( ):
'''simple docstring'''
forceWrite('''\r''')
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""")
def lowerCAmelCase__ ( ):
'''simple docstring'''
forceWrite(''' ''' * TERMINAL_WIDTH)
reset_cursor()
def lowerCAmelCase__ ( ):
'''simple docstring'''
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH)
| 647
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""hf-internal-testing/tiny-random-t5"""
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer("""This is me""" ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ =model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE__ =model.generate(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE__ =model_reloaded.generate(**_UpperCamelCase )
self.assertTrue(torch.allclose(_UpperCamelCase ,_UpperCamelCase ) )
def __A ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""hf-internal-testing/tiny-random-t5"""
SCREAMING_SNAKE_CASE__ =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_UpperCamelCase ):
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =model.reverse_bettertransformer()
model.save_pretrained(_UpperCamelCase )
| 151
| 0
|
"""simple docstring"""
from functools import lru_cache
def a__ ( __lowercase ) -> set:
_A = 2
_A = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowercase )
if n > 1:
factors.add(__lowercase )
return factors
@lru_cache
def a__ ( __lowercase ) -> int:
return len(unique_prime_factors(__lowercase ) )
def a__ ( __lowercase ) -> bool:
return len(set(__lowercase ) ) in (0, 1)
def a__ ( __lowercase ) -> list:
_A = 2
while True:
# Increment each value of a generated range
_A = [base + i for i in range(__lowercase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_A = [upf_len(__lowercase ) for x in group]
checker.append(__lowercase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowercase ):
return group
# Increment our base variable by 1
base += 1
def a__ ( __lowercase = 4 ) -> int:
_A = run(__lowercase )
return results[0] if len(__lowercase ) else None
if __name__ == "__main__":
print(solution())
| 621
|
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 621
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase_ =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase_ =4
lowerCamelCase_ =48
lowerCamelCase_ ='''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase_ =[6, 6, 6, 6]
lowerCamelCase_ =60
lowerCamelCase_ =[6, 6, 6, 6]
lowerCamelCase_ ='''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase_ =4
lowerCamelCase_ ='''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase_ =1
lowerCamelCase_ =1
lowerCamelCase_ =126
lowerCamelCase_ =7
lowerCamelCase_ =2_5_5.0
lowerCamelCase_ =''''''
return config
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ =name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
lowerCamelCase_ =name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
lowerCamelCase_ =name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
lowerCamelCase_ =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase_ =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase_ =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase_ =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase_ =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase_ =name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowerCamelCase_ =name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowerCamelCase_ =name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowerCamelCase_ =name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowerCamelCase_ =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
lowerCamelCase_ ='''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ ='''layernorm.bias'''
if "conv_first" in name:
lowerCamelCase_ =name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase_ =name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase_ =name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
lowerCamelCase_ =name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
lowerCamelCase_ =name.replace('''upsample.2''' , '''upsample.convolution_1''' )
lowerCamelCase_ ='''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase_ =name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
lowerCamelCase_ =name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
lowerCamelCase_ ='''swin2sr.''' + name
return name
def a_ ( __snake_case : Tuple , __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase_ =orig_state_dict.pop(__snake_case )
if "qkv" in key:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =int(key_split[1] )
lowerCamelCase_ =int(key_split[4] )
lowerCamelCase_ =config.embed_dim
if "weight" in key:
lowerCamelCase_ =val[:dim, :]
lowerCamelCase_ =val[dim : dim * 2, :]
lowerCamelCase_ =val[-dim:, :]
else:
lowerCamelCase_ =val[:dim]
lowerCamelCase_ =val[dim : dim * 2]
lowerCamelCase_ =val[-dim:]
pass
else:
lowerCamelCase_ =val
return orig_state_dict
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =get_config(__snake_case )
lowerCamelCase_ =SwinaSRForImageSuperResolution(__snake_case )
model.eval()
lowerCamelCase_ =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' )
lowerCamelCase_ =convert_state_dict(__snake_case , __snake_case )
lowerCamelCase_, lowerCamelCase_ =model.load_state_dict(__snake_case , strict=__snake_case )
if len(__snake_case ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__snake_case ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
lowerCamelCase_ ='''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
lowerCamelCase_ =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase_ =126 if '''Jpeg''' in checkpoint_url else 256
lowerCamelCase_ =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase_ =transforms(__snake_case ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase_ =pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase_ =model(__snake_case )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase_ =torch.Size([1, 3, 512, 512] )
lowerCamelCase_ =torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase_ =torch.Size([1, 3, 1024, 1024] )
lowerCamelCase_ =torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase_ =torch.Size([1, 3, 1024, 1024] )
lowerCamelCase_ =torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase_ =torch.Size([1, 3, 512, 512] )
lowerCamelCase_ =torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase_ =torch.Size([1, 3, 1024, 1024] )
lowerCamelCase_ =torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __snake_case , atol=1e-3 )
print('''Looks ok!''' )
lowerCamelCase_ ={
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
lowerCamelCase_ =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__snake_case )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
a_ : List[str] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( A__ : Optional[Any] , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = checkpoint
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_in.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_in.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_out.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_out.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["encoder.norm_out.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["encoder.norm_out.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_in.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_in.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_out.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_out.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["decoder.norm_out.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["decoder.norm_out.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["quant_conv.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["quant_conv.bias"]
SCREAMING_SNAKE_CASE = vae_state_dict["post_quant_conv.weight"]
SCREAMING_SNAKE_CASE = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(A__ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(A__ )
}
for i in range(A__ ):
SCREAMING_SNAKE_CASE = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
SCREAMING_SNAKE_CASE = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(A__ )
SCREAMING_SNAKE_CASE = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "encoder.mid.block" in key]
SCREAMING_SNAKE_CASE = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(A__ )
SCREAMING_SNAKE_CASE = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "encoder.mid.attn" in key]
SCREAMING_SNAKE_CASE = renew_vae_attention_paths(A__ )
SCREAMING_SNAKE_CASE = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
conv_attn_to_linear(A__ )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
SCREAMING_SNAKE_CASE = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(A__ )
SCREAMING_SNAKE_CASE = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "decoder.mid.block" in key]
SCREAMING_SNAKE_CASE = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(A__ )
SCREAMING_SNAKE_CASE = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "decoder.mid.attn" in key]
SCREAMING_SNAKE_CASE = renew_vae_attention_paths(A__ )
SCREAMING_SNAKE_CASE = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
conv_attn_to_linear(A__ )
return new_checkpoint
def __a ( A__ : str , A__ : str , ):
# Only support V1
SCREAMING_SNAKE_CASE = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
SCREAMING_SNAKE_CASE = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE = OmegaConf.load(A__ )
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE = {}
with safe_open(A__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE = f.get_tensor(A__ )
else:
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location=A__ )["state_dict"]
# Convert the VAE model.
SCREAMING_SNAKE_CASE = create_vae_diffusers_config(A__ , image_size=A__ )
SCREAMING_SNAKE_CASE = custom_convert_ldm_vae_checkpoint(A__ , A__ )
SCREAMING_SNAKE_CASE = AutoencoderKL(**A__ )
vae.load_state_dict(A__ )
vae.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__A : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 698
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 698
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=[30, 30] , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=8 , __lowerCAmelCase=10 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = n_targets
lowerCAmelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase = num_patches + 1 + self.num_detection_tokens
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
lowerCAmelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase = []
for i in range(self.batch_size):
lowerCAmelCase = {}
lowerCAmelCase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__lowerCAmelCase)
lowerCAmelCase = torch.rand(self.n_targets , 4 , device=__lowerCAmelCase)
labels.append(__lowerCAmelCase)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = YolosModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = YolosForObjectDetection(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(pixel_values=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
lowerCAmelCase = model(pixel_values=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase_ : Any = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase = []
for i in range(self.model_tester.batch_size):
lowerCAmelCase = {}
lowerCAmelCase = torch.ones(
size=(self.model_tester.n_targets,) , device=__lowerCAmelCase , dtype=torch.long)
lowerCAmelCase = torch.ones(
self.model_tester.n_targets , 4 , device=__lowerCAmelCase , dtype=torch.float)
labels.append(__lowerCAmelCase)
lowerCAmelCase = labels
return inputs_dict
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = YolosModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
lowerCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
# in YOLOS, the seq_len is different
lowerCAmelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase = len(__lowerCAmelCase)
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
lowerCAmelCase = 1
self.assertEqual(out_len + added_hidden_states , len(__lowerCAmelCase))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def a_ ( self):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__lowerCAmelCase) , __lowerCAmelCase)
# YOLOS has a different seq_length
lowerCAmelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = YolosModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def snake_case__ ( ) -> Any:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""") if is_vision_available() else None
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""").to(__lowerCAmelCase)
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""").to(__lowerCAmelCase)
# forward pass
with torch.no_grad():
lowerCAmelCase = model(inputs.pixel_values)
# verify outputs
lowerCAmelCase = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__lowerCAmelCase , )
lowerCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4))
# verify postprocessing
lowerCAmelCase = image_processor.post_process_object_detection(
__lowerCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
lowerCAmelCase = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(__lowerCAmelCase)
lowerCAmelCase = [75, 75, 17, 63, 17]
lowerCAmelCase = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(__lowerCAmelCase)
self.assertEqual(len(results["""scores"""]) , 5)
self.assertTrue(torch.allclose(results["""scores"""] , __lowerCAmelCase , atol=1E-4))
self.assertSequenceEqual(results["""labels"""].tolist() , __lowerCAmelCase)
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , __lowerCAmelCase))
| 370
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowercase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowercase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowercase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , ):
"""simple docstring"""
lowerCAmelCase = len(references[0])
if any(len(__lowerCAmelCase) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
lowerCAmelCase = [[refs[i] for refs in references] for i in range(__lowerCAmelCase)]
lowerCAmelCase = TER(
normalized=__lowerCAmelCase , no_punct=__lowerCAmelCase , asian_support=__lowerCAmelCase , case_sensitive=__lowerCAmelCase , )
lowerCAmelCase = sb_ter.corpus_score(__lowerCAmelCase , __lowerCAmelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 370
| 1
|
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a_ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 713
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[Any] = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase_ (_lowerCAmelCase : List[str] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCAmelCase )
def UpperCAmelCase_ (_lowerCAmelCase : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : Union[str, Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCAmelCase , id=_lowerCAmelCase )
| 327
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
__UpperCamelCase : int = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" , revision=_lowerCAmelCase )
| 327
| 1
|
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase__ )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE : Optional[int] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : Tuple = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE : Tuple = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE : Dict = ""
SCREAMING_SNAKE_CASE : Tuple = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCamelCase : Any = "AGGTAB"
_lowerCamelCase : str = "GXTXAYB"
_lowerCamelCase : List[str] = 4
_lowerCamelCase : Optional[int] = "GTAB"
_lowerCamelCase : Optional[int] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 710
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class lowercase :
'''simple docstring'''
UpperCAmelCase : str
UpperCAmelCase : str = None
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : List[str] , snake_case : List[Any] , snake_case : int , snake_case : str , **snake_case : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Union[str, Any] , snake_case : List[str] ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 'optuna'
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : str , **snake_case : str ):
'''simple docstring'''
return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case )
def lowerCamelCase_ ( self : List[str] , snake_case : int ):
'''simple docstring'''
return default_hp_space_optuna(snake_case )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : int = 'ray'
UpperCAmelCase : Any = '\'ray[tune]\''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase_ ( self : Union[str, Any] , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : Any ):
'''simple docstring'''
return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case )
def lowerCamelCase_ ( self : str , snake_case : Dict ):
'''simple docstring'''
return default_hp_space_ray(snake_case )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Tuple = 'sigopt'
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : str ):
'''simple docstring'''
return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case )
def lowerCamelCase_ ( self : Dict , snake_case : int ):
'''simple docstring'''
return default_hp_space_sigopt(snake_case )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Any = 'wandb'
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : int ):
'''simple docstring'''
return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case )
def lowerCamelCase_ ( self : Optional[Any] , snake_case : Any ):
'''simple docstring'''
return default_hp_space_wandb(snake_case )
_lowerCamelCase : Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __a ( ) -> str:
SCREAMING_SNAKE_CASE : Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Any = available_backends[0].name
if len(__lowerCAmelCase ) > 1:
logger.info(
F'''{len(__lowerCAmelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 308
| 0
|
def UpperCamelCase ( _A : int )-> bool:
"""simple docstring"""
A__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 491
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] )-> Any:
"""simple docstring"""
A__ = OmegaConf.load(_A )
A__ = torch.load(_A , map_location="cpu" )["model"]
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = "first_stage_model."
for key in keys:
if key.startswith(_A ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = "model.diffusion_model."
for key in keys:
if key.startswith(_A ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**_A ).eval()
vqvae.load_state_dict(_A )
A__ = UNetLDMModel(**_A ).eval()
unet.load_state_dict(_A )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_A , )
A__ = LDMPipeline(_A , _A , _A )
pipeline.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
UpperCAmelCase_ : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 491
| 1
|
from __future__ import annotations
from math import gcd
def A( snake_case_ , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ , snake_case_ , snake_case_ ) -> int:
return (pow(snake_case_ , 2 ) + step) % modulus
for _ in range(snake_case_ ):
# These track the position within the cycle detection logic.
lowercase__: List[Any] = seed
lowercase__: Union[str, Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase__: Dict = rand_fn(snake_case_ , snake_case_ , snake_case_ )
lowercase__: str = rand_fn(snake_case_ , snake_case_ , snake_case_ )
lowercase__: Dict = rand_fn(snake_case_ , snake_case_ , snake_case_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase__: Dict = gcd(hare - tortoise , snake_case_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase__: Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
UpperCamelCase = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 715
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """new-model"""
if is_tf_available():
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = NewModelConfig
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Tuple = "bert-base-cased"
lowercase__: List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: int = TFAutoModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = "bert-base-cased"
lowercase__: List[str] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Dict = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: str = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Dict = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: int = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: str = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: int = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__: Tuple = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
@require_tensorflow_probability
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase__: List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_)
lowercase__ , lowercase__: Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_) , 14_410)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_) , 14_410)
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: int = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[Any] = copy.deepcopy(model.config)
lowercase__: Optional[Any] = ["FunnelBaseModel"]
lowercase__: Union[str, Any] = TFAutoModel.from_config(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_)
lowercase__: Union[str, Any] = TFAutoModel.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def __lowercase ( self) -> Dict:
'''simple docstring'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_)
lowercase__: Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase_):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__: Tuple = BertModelTester(self).get_config()
lowercase__: List[Any] = NewModelConfig(**tiny_config.to_dict())
lowercase__: Optional[int] = auto_class.from_config(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_)
lowercase__: int = auto_class.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier"):
lowercase__: List[str] = TFAutoModel.from_pretrained("bert-base")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
lowercase__: Any = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
lowercase__: Any = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def __lowercase ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model"):
lowercase__: List[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
lowercase__: Dict = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
lowercase__: Union[str, Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
with RequestCounter() as counter:
lowercase__: List[str] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 120
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: Optional[int]= multiprocessing.Manager()
SCREAMING_SNAKE_CASE__: List[str]= manager.list()
SCREAMING_SNAKE_CASE__: List[str]= multiprocessing.Process(target=snake_case_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A__ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__: List[str]= shutil.rmtree
SCREAMING_SNAKE_CASE__: Union[str, Any]= os.rmdir
SCREAMING_SNAKE_CASE__: List[Any]= os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__: Tuple= {}
with swallow_io():
with time_limit(snake_case_ ):
exec(snake_case_ , snake_case_ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__: int= rmtree
SCREAMING_SNAKE_CASE__: List[Any]= rmdir
SCREAMING_SNAKE_CASE__: Any= chdir
@contextlib.contextmanager
def A__ ( snake_case_ : int ):
def signal_handler(snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case_ )
signal.signal(signal.SIGALRM , snake_case_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[str]= WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case_ ):
with contextlib.redirect_stderr(snake_case_ ):
with redirect_stdin(snake_case_ ):
yield
@contextlib.contextmanager
def A__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case_ ):
yield dirname
class _lowerCamelCase ( UpperCamelCase_ ):
pass
class _lowerCamelCase ( io.StringIO ):
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> str:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> str:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
return False
class _lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
__a = "stdin"
@contextlib.contextmanager
def A__ ( snake_case_ : Dict ):
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__: Tuple= os.getcwd()
os.chdir(snake_case_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case_ )
def A__ ( snake_case_ : Union[str, Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Tuple= None
import os
SCREAMING_SNAKE_CASE__: Any= '''1'''
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
import shutil
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Tuple= None
import subprocess
SCREAMING_SNAKE_CASE__: int= None # type: ignore
SCREAMING_SNAKE_CASE__: Optional[int]= None
import sys
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
| 64
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35
| 0
|
'''simple docstring'''
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> str:
"""simple docstring"""
UpperCamelCase = multiprocessing.Manager()
UpperCamelCase = manager.list()
UpperCamelCase = multiprocessing.Process(target=_UpperCamelCase , args=(check_program, result, timeout))
p.start()
p.join(timeout=timeout + 1)
if p.is_alive():
p.kill()
if not result:
result.append('timed out')
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCamelCase = shutil.rmtree
UpperCamelCase = os.rmdir
UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCamelCase = {}
with swallow_io():
with time_limit(_UpperCamelCase):
exec(_UpperCamelCase , _UpperCamelCase)
result.append('passed')
except TimeoutException:
result.append('timed out')
except BaseException as e:
result.append(F'failed: {e}')
# Needed for cleaning up.
UpperCamelCase = rmtree
UpperCamelCase = rmdir
UpperCamelCase = chdir
@contextlib.contextmanager
def lowercase__ ( _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
def signal_handler(_UpperCamelCase , _UpperCamelCase):
raise TimeoutException('Timed out!')
signal.setitimer(signal.ITIMER_REAL , _UpperCamelCase)
signal.signal(signal.SIGALRM , _UpperCamelCase)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0)
@contextlib.contextmanager
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCamelCase):
with contextlib.redirect_stderr(_UpperCamelCase):
with redirect_stdin(_UpperCamelCase):
yield
@contextlib.contextmanager
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCamelCase):
yield dirname
class A__ ( _UpperCAmelCase ):
'''simple docstring'''
pass
class A__ ( io.StringIO ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
raise OSError
def _SCREAMING_SNAKE_CASE ( self : Dict , *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
raise OSError
def _SCREAMING_SNAKE_CASE ( self : List[str] , *_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
raise OSError
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return False
class A__ ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
snake_case__ = '''stdin'''
@contextlib.contextmanager
def lowercase__ ( _UpperCamelCase) -> Tuple:
"""simple docstring"""
if root == ".":
yield
return
UpperCamelCase = os.getcwd()
os.chdir(_UpperCamelCase)
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase=None) -> Union[str, Any]:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes))
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes))
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes))
faulthandler.disable()
import builtins
UpperCamelCase = None
UpperCamelCase = None
import os
UpperCamelCase = '1'
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
import shutil
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
import subprocess
UpperCamelCase = None # type: ignore
UpperCamelCase = None
import sys
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
| 705
|
__magic_name__ : List[str] = tuple[float, float, float]
__magic_name__ : Optional[int] = tuple[float, float, float]
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = end_pointa[0] - end_pointa[0]
UpperCamelCase = end_pointa[1] - end_pointa[1]
UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return tuple(round(_UpperCamelCase , _UpperCamelCase) for x in vector) == (0, 0, 0)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10) -> bool:
"""simple docstring"""
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
return is_zero_vector(get_ad_vectors_cross(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase)
| 410
| 0
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , __A : int , __A : int , __A : int , __A : str=0.0 , __A : Optional[int] = None , __A : str = "geglu" , __A : Optional[int] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : str = "layer_norm" , __A : bool = False , ):
super().__init__()
__A : Any = only_cross_attention
__A : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__A : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__A : str = AdaLayerNorm(__A , __A )
elif self.use_ada_layer_norm_zero:
__A : Dict = AdaLayerNormZero(__A , __A )
else:
__A : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A )
__A : List[str] = Attention(
query_dim=__A , heads=__A , dim_head=__A , dropout=__A , bias=__A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__A : Optional[Any] = (
AdaLayerNorm(__A , __A )
if self.use_ada_layer_norm
else nn.LayerNorm(__A , elementwise_affine=__A )
)
__A : Any = Attention(
query_dim=__A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__A , dim_head=__A , dropout=__A , bias=__A , upcast_attention=__A , ) # is self-attn if encoder_hidden_states is none
else:
__A : str = None
__A : int = None
# 3. Feed-forward
__A : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A )
__A : Dict = FeedForward(__A , dropout=__A , activation_fn=__A , final_dropout=__A )
# let chunk size default to None
__A : List[Any] = None
__A : Union[str, Any] = 0
def lowerCAmelCase_ ( self : List[str] , __A : Optional[int] , __A : int ):
# Sets chunk feed-forward
__A : str = chunk_size
__A : List[str] = dim
def lowerCAmelCase_ ( self : Optional[int] , __A : torch.FloatTensor , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.LongTensor] = None , __A : Dict[str, Any] = None , __A : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__A : Tuple = self.norma(__A , __A )
elif self.use_ada_layer_norm_zero:
__A , __A , __A , __A , __A : Union[str, Any] = self.norma(
__A , __A , __A , hidden_dtype=hidden_states.dtype )
else:
__A : List[str] = self.norma(__A )
__A : Tuple = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__A : Optional[int] = self.attna(
__A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__A , **__A , )
if self.use_ada_layer_norm_zero:
__A : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
__A : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__A : Union[str, Any] = (
self.norma(__A , __A ) if self.use_ada_layer_norm else self.norma(__A )
)
__A : Tuple = self.attna(
__A , encoder_hidden_states=__A , attention_mask=__A , **__A , )
__A : Union[str, Any] = attn_output + hidden_states
# 3. Feed-forward
__A : Tuple = self.norma(__A )
if self.use_ada_layer_norm_zero:
__A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__A : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__A : List[str] = torch.cat(
[self.ff(__A ) for hid_slice in norm_hidden_states.chunk(__A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__A : List[Any] = self.ff(__A )
if self.use_ada_layer_norm_zero:
__A : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output
__A : Any = ff_output + hidden_states
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , __A : int , __A : Optional[int] = None , __A : int = 4 , __A : float = 0.0 , __A : str = "geglu" , __A : bool = False , ):
super().__init__()
__A : Any = int(dim * mult )
__A : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__A : Tuple = GELU(__A , __A )
if activation_fn == "gelu-approximate":
__A : int = GELU(__A , __A , approximate="""tanh""" )
elif activation_fn == "geglu":
__A : List[str] = GEGLU(__A , __A )
elif activation_fn == "geglu-approximate":
__A : Any = ApproximateGELU(__A , __A )
__A : Optional[int] = nn.ModuleList([] )
# project in
self.net.append(__A )
# project dropout
self.net.append(nn.Dropout(__A ) )
# project out
self.net.append(nn.Linear(__A , __A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__A ) )
def lowerCAmelCase_ ( self : Any , __A : Union[str, Any] ):
for module in self.net:
__A : List[Any] = module(__A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , __A : int , __A : int , __A : str = "none" ):
super().__init__()
__A : Dict = nn.Linear(__A , __A )
__A : List[Any] = approximate
def lowerCAmelCase_ ( self : str , __A : Optional[Any] ):
if gate.device.type != "mps":
return F.gelu(__A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : List[Any] , __A : Optional[int] ):
__A : Union[str, Any] = self.proj(__A )
__A : Tuple = self.gelu(__A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , __A : int , __A : int ):
super().__init__()
__A : Optional[int] = nn.Linear(__A , dim_out * 2 )
def lowerCAmelCase_ ( self : Tuple , __A : Tuple ):
if gate.device.type != "mps":
return F.gelu(__A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : int , __A : Dict ):
__A , __A : Dict = self.proj(__A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__A )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , __A : int , __A : int ):
super().__init__()
__A : Tuple = nn.Linear(__A , __A )
def lowerCAmelCase_ ( self : int , __A : Tuple ):
__A : List[str] = self.proj(__A )
return x * torch.sigmoid(1.7_0_2 * x )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : int , __A : str , __A : str ):
super().__init__()
__A : Optional[Any] = nn.Embedding(__A , __A )
__A : Any = nn.SiLU()
__A : Optional[Any] = nn.Linear(__A , embedding_dim * 2 )
__A : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A )
def lowerCAmelCase_ ( self : str , __A : Any , __A : Tuple ):
__A : List[Any] = self.linear(self.silu(self.emb(__A ) ) )
__A , __A : Union[str, Any] = torch.chunk(__A , 2 )
__A : str = self.norm(__A ) * (1 + scale) + shift
return x
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , __A : Union[str, Any] , __A : int ):
super().__init__()
__A : Any = CombinedTimestepLabelEmbeddings(__A , __A )
__A : Any = nn.SiLU()
__A : Tuple = nn.Linear(__A , 6 * embedding_dim , bias=__A )
__A : Union[str, Any] = nn.LayerNorm(__A , elementwise_affine=__A , eps=1e-6 )
def lowerCAmelCase_ ( self : Tuple , __A : Any , __A : Union[str, Any] , __A : Dict , __A : Optional[int]=None ):
__A : Tuple = self.linear(self.silu(self.emb(__A , __A , hidden_dtype=__A ) ) )
__A , __A , __A , __A , __A , __A : List[Any] = emb.chunk(6 , dim=1 )
__A : str = self.norm(__A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Dict , __A : int , __A : int , __A : int , __A : Optional[str] = None , __A : float = 1e-5 ):
super().__init__()
__A : Optional[Any] = num_groups
__A : Tuple = eps
if act_fn is None:
__A : Union[str, Any] = None
else:
__A : Tuple = get_activation(__A )
__A : Optional[Any] = nn.Linear(__A , out_dim * 2 )
def lowerCAmelCase_ ( self : List[Any] , __A : List[Any] , __A : Optional[int] ):
if self.act:
__A : Union[str, Any] = self.act(__A )
__A : List[Any] = self.linear(__A )
__A : Dict = emb[:, :, None, None]
__A , __A : str = emb.chunk(2 , dim=1 )
__A : str = F.group_norm(__A , self.num_groups , eps=self.eps )
__A : Any = x * (1 + scale) + shift
return x
| 17
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__a : int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 706
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase = Features({"""text""": Value("""string""" )} )
lowercase = Features({"""labels""": ClassLabel} )
lowercase = "text"
lowercase = "labels"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCamelCase = copy.deepcopy(self )
UpperCamelCase = self.label_schema.copy()
UpperCamelCase = features[self.label_column]
UpperCamelCase = label_schema
return task_template
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 414
| 0
|
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
if index == number_of_items:
return 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[int] = knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ : Optional[Any] = values[index] + knapsack(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_weight - weights[index] ,index + 1 )
return max(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
|
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
if index == number_of_items:
return 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[int] = knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ : Optional[Any] = values[index] + knapsack(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_weight - weights[index] ,index + 1 )
return max(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a_ ( snake_case ):
UpperCAmelCase : Dict = """time_series_transformer"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : Optional[Union[str, bool]] = "mean" , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 3_2 , a_ : int = 3_2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : bool = True , a_ : str = "gelu" , a_ : int = 6_4 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 1_0_0 , a_ : float = 0.0_2 , a_ : Optional[int]=True , **a_ : Tuple , ) -> Optional[int]:
# time series specific configuration
snake_case: Dict =prediction_length
snake_case: Any =context_length or prediction_length
snake_case: str =distribution_output
snake_case: List[str] =loss
snake_case: Optional[Any] =input_size
snake_case: Optional[Any] =num_time_features
snake_case: List[str] =lags_sequence
snake_case: Union[str, Any] =scaling
snake_case: List[str] =num_dynamic_real_features
snake_case: Any =num_static_real_features
snake_case: Dict =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case: Dict =cardinality
else:
snake_case: Dict =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case: List[Any] =embedding_dimension
else:
snake_case: str =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case: Any =num_parallel_samples
# Transformer architecture configuration
snake_case: Union[str, Any] =input_size * len(a_ ) + self._number_of_features
snake_case: List[Any] =d_model
snake_case: int =encoder_attention_heads
snake_case: Optional[int] =decoder_attention_heads
snake_case: str =encoder_ffn_dim
snake_case: List[Any] =decoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[str] =decoder_layers
snake_case: List[Any] =dropout
snake_case: Union[str, Any] =attention_dropout
snake_case: Optional[int] =activation_dropout
snake_case: str =encoder_layerdrop
snake_case: Optional[int] =decoder_layerdrop
snake_case: Tuple =activation_function
snake_case: List[Any] =init_std
snake_case: Union[str, Any] =use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Tuple ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 347
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : List[str] = CamembertTokenizer
UpperCAmelCase : Dict = CamembertTokenizerFast
UpperCAmelCase : List[str] = True
UpperCAmelCase : str = True
def UpperCamelCase ( self : str ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case: Dict =CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
snake_case: Any ='<pad>'
snake_case: Dict =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
snake_case: List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1_0_0_4 )
def UpperCamelCase ( self : Dict ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCamelCase ( self : List[Any] ) -> Dict:
snake_case: Tuple =CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
snake_case: List[Any] =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case: str ='I was born in 92000, and this is falsé.'
snake_case: Optional[int] =tokenizer.encode(a_ )
snake_case: int =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
snake_case: Any =tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Union[str, Any] =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case: Any =tokenizer.convert_ids_to_tokens(a_ )
snake_case: int =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCamelCase ( self : Dict ) -> int:
if not self.test_rust_tokenizer:
return
snake_case: Tuple =self.get_tokenizer()
snake_case: Union[str, Any] =self.get_rust_tokenizer()
snake_case: Tuple ='I was born in 92000, and this is falsé.'
snake_case: Dict =tokenizer.tokenize(a_ )
snake_case: Optional[int] =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
snake_case: Optional[Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Optional[Any] =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
snake_case: Any =self.get_rust_tokenizer()
snake_case: Union[str, Any] =tokenizer.encode(a_ )
snake_case: List[Any] =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
# fmt: off
snake_case: List[Any] ={'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case: Any =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 347
| 1
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowerCamelCase : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCAmelCase : Optional[Any] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_lowerCAmelCase : Dict = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 46
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
A_ = "examples/"
A_ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A_ = "README.md"
def A_ ( snake_case , snake_case , snake_case ):
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[str] = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE:Tuple = replace.replace("VERSION" , snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = re_pattern.sub(snake_case , snake_case )
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(snake_case )
def A_ ( snake_case ):
for folder, directories, fnames in os.walk(snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern="examples" )
def A_ ( snake_case , snake_case=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case , snake_case , snake_case )
if not patch:
update_version_in_examples(snake_case )
def A_ ( ):
SCREAMING_SNAKE_CASE:int = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE:int = "1. Want to contribute a new model?"
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[Any] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE:Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE:str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE:Optional[Any] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case )
def A_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE:str = f.read()
SCREAMING_SNAKE_CASE:Tuple = REPLACE_PATTERNS["init"][0].search(snake_case ).groups()[0]
return packaging.version.parse(snake_case )
def A_ ( snake_case=False ):
SCREAMING_SNAKE_CASE:Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE:Any = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE:Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case , patch=snake_case )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def A_ ( ):
SCREAMING_SNAKE_CASE:int = get_version()
SCREAMING_SNAKE_CASE:int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE:Optional[Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE:Any = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:Union[str, Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 143
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : BigBirdConfig
lowerCAmelCase__ : jnp.dtype = jnp.floataa
lowerCAmelCase__ : bool = True
def _UpperCAmelCase ( self: Any ) -> Optional[Any]:
'''simple docstring'''
super().setup()
__UpperCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self: Union[str, Any] , *__lowerCAmelCase: int , **__lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : int = FlaxBigBirdForNaturalQuestionsModule
def __lowerCAmelCase ( A_ : int , A_ : List[Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Dict ) -> int:
def cross_entropy(A_ : List[str] , A_ : int , A_ : List[str]=None ):
__UpperCAmelCase = logits.shape[-1]
__UpperCAmelCase = (labels[..., None] == jnp.arange(A_ )[None]).astype("f4" )
__UpperCAmelCase = jax.nn.log_softmax(A_ , axis=-1 )
__UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__UpperCAmelCase = reduction(A_ )
return loss
__UpperCAmelCase = partial(A_ , reduction=jnp.mean )
__UpperCAmelCase = cross_entropy(A_ , A_ )
__UpperCAmelCase = cross_entropy(A_ , A_ )
__UpperCAmelCase = cross_entropy(A_ , A_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : str = "google/bigbird-roberta-base"
lowerCAmelCase__ : int = 3000
lowerCAmelCase__ : int = 1_0500
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : int = 5
# tx_args
lowerCAmelCase__ : float = 3E-5
lowerCAmelCase__ : float = 0.0
lowerCAmelCase__ : int = 2_0000
lowerCAmelCase__ : float = 0.0_095
lowerCAmelCase__ : str = "bigbird-roberta-natural-questions"
lowerCAmelCase__ : str = "training-expt"
lowerCAmelCase__ : str = "data/nq-training.jsonl"
lowerCAmelCase__ : str = "data/nq-validation.jsonl"
def _UpperCAmelCase ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__lowerCAmelCase )
__UpperCAmelCase = os.path.join(self.base_dir , self.save_dir )
__UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : int
lowerCAmelCase__ : int = 4096 # no dynamic padding on TPUs
def __call__( self: Any , __lowerCAmelCase: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = self.collate_fn(__lowerCAmelCase )
__UpperCAmelCase = jax.tree_util.tree_map(__lowerCAmelCase , __lowerCAmelCase )
return batch
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.fetch_inputs(features["input_ids"] )
__UpperCAmelCase = {
"input_ids": jnp.array(__lowerCAmelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(__lowerCAmelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: list ) -> str:
'''simple docstring'''
__UpperCAmelCase = [self._fetch_inputs(__lowerCAmelCase ) for ids in input_ids]
return zip(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: list ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [1 for _ in range(len(__lowerCAmelCase ) )]
while len(__lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowerCAmelCase ( A_ : List[Any] , A_ : Optional[int] , A_ : List[Any]=None ) -> Optional[Any]:
if seed is not None:
__UpperCAmelCase = dataset.shuffle(seed=A_ )
for i in range(len(A_ ) // batch_size ):
__UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A_ )
@partial(jax.pmap , axis_name="batch" )
def __lowerCAmelCase ( A_ : int , A_ : List[Any] , **A_ : int ) -> Optional[Any]:
def loss_fn(A_ : List[Any] ):
__UpperCAmelCase = model_inputs.pop("start_labels" )
__UpperCAmelCase = model_inputs.pop("end_labels" )
__UpperCAmelCase = model_inputs.pop("pooled_labels" )
__UpperCAmelCase = state.apply_fn(**A_ , params=A_ , dropout_rng=A_ , train=A_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = outputs
return state.loss_fn(
A_ , A_ , A_ , A_ , A_ , A_ , )
__UpperCAmelCase , __UpperCAmelCase = jax.random.split(A_ )
__UpperCAmelCase = jax.value_and_grad(A_ )
__UpperCAmelCase , __UpperCAmelCase = grad_fn(state.params )
__UpperCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
__UpperCAmelCase = jax.lax.pmean(A_ , "batch" )
__UpperCAmelCase = state.apply_gradients(grads=A_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def __lowerCAmelCase ( A_ : Any , **A_ : Tuple ) -> Optional[Any]:
__UpperCAmelCase = model_inputs.pop("start_labels" )
__UpperCAmelCase = model_inputs.pop("end_labels" )
__UpperCAmelCase = model_inputs.pop("pooled_labels" )
__UpperCAmelCase = state.apply_fn(**A_ , params=state.params , train=A_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = outputs
__UpperCAmelCase = state.loss_fn(A_ , A_ , A_ , A_ , A_ , A_ )
__UpperCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class UpperCAmelCase__ ( train_state.TrainState ):
"""simple docstring"""
lowerCAmelCase__ : Callable = struct.field(pytree_node=snake_case )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Args
lowerCAmelCase__ : Callable
lowerCAmelCase__ : Callable
lowerCAmelCase__ : Callable
lowerCAmelCase__ : Callable
lowerCAmelCase__ : wandb
lowerCAmelCase__ : Callable = None
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = model.params
__UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=__lowerCAmelCase , tx=__lowerCAmelCase , loss_fn=__lowerCAmelCase , )
if ckpt_dir is not None:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = restore_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__UpperCAmelCase , __UpperCAmelCase = build_tx(**__lowerCAmelCase )
__UpperCAmelCase = train_state.TrainState(
step=__lowerCAmelCase , apply_fn=model.__call__ , params=__lowerCAmelCase , tx=__lowerCAmelCase , opt_state=__lowerCAmelCase , )
__UpperCAmelCase = args
__UpperCAmelCase = data_collator
__UpperCAmelCase = lr
__UpperCAmelCase = params
__UpperCAmelCase = jax_utils.replicate(__lowerCAmelCase )
return state
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.args
__UpperCAmelCase = len(__lowerCAmelCase ) // args.batch_size
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(__lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCAmelCase = get_batched_dataset(__lowerCAmelCase , args.batch_size , seed=__lowerCAmelCase )
__UpperCAmelCase = 0
for batch in tqdm(__lowerCAmelCase , total=__lowerCAmelCase , desc=F'''Running EPOCH-{epoch}''' ):
__UpperCAmelCase = self.data_collator(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.train_step_fn(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__UpperCAmelCase = jax_utils.unreplicate(state.step )
__UpperCAmelCase = running_loss.item() / i
__UpperCAmelCase = self.scheduler_fn(state_step - 1 )
__UpperCAmelCase = self.evaluate(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__lowerCAmelCase ) )
self.logger.log(__lowerCAmelCase , commit=__lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = get_batched_dataset(__lowerCAmelCase , self.args.batch_size )
__UpperCAmelCase = len(__lowerCAmelCase ) // self.args.batch_size
__UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCAmelCase = 0
for batch in tqdm(__lowerCAmelCase , total=__lowerCAmelCase , desc="Evaluating ... " ):
__UpperCAmelCase = self.data_collator(__lowerCAmelCase )
__UpperCAmelCase = self.val_step_fn(__lowerCAmelCase , **__lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: str , __lowerCAmelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = jax_utils.unreplicate(__lowerCAmelCase )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... " )
self.model_save_fn(__lowerCAmelCase , params=state.params )
with open(os.path.join(__lowerCAmelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__lowerCAmelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__lowerCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(__lowerCAmelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __lowerCAmelCase )
print("DONE" )
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Optional[Any] ) -> Union[str, Any]:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=" ... " )
with open(os.path.join(A_ , "flax_model.msgpack" ) , "rb" ) as f:
__UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(A_ , "opt_state.msgpack" ) , "rb" ) as f:
__UpperCAmelCase = from_bytes(state.opt_state , f.read() )
__UpperCAmelCase = joblib.load(os.path.join(A_ , "args.joblib" ) )
__UpperCAmelCase = joblib.load(os.path.join(A_ , "data_collator.joblib" ) )
with open(os.path.join(A_ , "training_state.json" ) , "r" ) as f:
__UpperCAmelCase = json.load(A_ )
__UpperCAmelCase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def __lowerCAmelCase ( A_ : Tuple , A_ : List[str] , A_ : str , A_ : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase = num_train_steps - warmup_steps
__UpperCAmelCase = optax.linear_schedule(init_value=A_ , end_value=A_ , transition_steps=A_ )
__UpperCAmelCase = optax.linear_schedule(init_value=A_ , end_value=1e-7 , transition_steps=A_ )
__UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __lowerCAmelCase ( A_ : List[str] , A_ : str , A_ : Dict , A_ : Any , A_ : Any ) -> Optional[Any]:
def weight_decay_mask(A_ : str ):
__UpperCAmelCase = traverse_util.flatten_dict(A_ )
__UpperCAmelCase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(A_ )
__UpperCAmelCase = scheduler_fn(A_ , A_ , A_ , A_ )
__UpperCAmelCase = optax.adamw(learning_rate=A_ , weight_decay=A_ , mask=A_ )
return tx, lr
| 701
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self: List[str] ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
| 286
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
"""simple docstring"""
@staticmethod
def A__ ( *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[Any] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def A__ ( self : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
A__ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
A__ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def A__ ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
A__ = vqa_pipeline(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
[{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase )}],
[{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase )}],
] , )
@require_torch
def A__ ( self : Optional[Any] ):
A__ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = '''How many cats are there?'''
A__ = vqa_pipeline(image=_lowerCamelCase , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase )}, {'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase )}] )
A__ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase )}, {'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase )}] )
@slow
@require_torch
def A__ ( self : Optional[Any] ):
A__ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = '''How many cats are there?'''
A__ = vqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
A__ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
A__ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def A__ ( self : Optional[Any] ):
pass
| 571
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : Optional[int] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['ViTFeatureExtractor']
__snake_case : Any = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__lowerCAmelCase =False
try:
__lowerCAmelCase =_is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __magic_name__ :
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str = None ,__SCREAMING_SNAKE_CASE : list = [] ):
UpperCAmelCase = 0
UpperCAmelCase = choices
UpperCAmelCase = prompt
if sys.platform == "win32":
UpperCAmelCase = "*"
else:
UpperCAmelCase = "➔ "
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] ,3_2 ,__SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ):
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(__SCREAMING_SNAKE_CASE )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Direction ,__SCREAMING_SNAKE_CASE : int = 1 ):
UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__SCREAMING_SNAKE_CASE )
move_cursor(__SCREAMING_SNAKE_CASE ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _UpperCAmelCase ( self : Optional[int] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _UpperCAmelCase ( self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _UpperCAmelCase ( self : int ):
move_cursor(len(self.choices ) - self.position ,"DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _UpperCAmelCase ( self : int ):
move_cursor(len(self.choices ) - self.position ,"DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__SCREAMING_SNAKE_CASE )] for number in range(1_0 )] )
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = int(chr(self.current_selection ) )
UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,__SCREAMING_SNAKE_CASE )
else:
return
else:
return
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt ,"\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" ,"\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" ,"\n" )
UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__SCREAMING_SNAKE_CASE )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position ,"UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase = int(builtins.input() )
except ValueError:
UpperCAmelCase = default_choice
else:
UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,"UP" )
clear_line()
self.write_choice(__SCREAMING_SNAKE_CASE ,"\n" )
return choice
| 405
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 405
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : List[Any] = LEDConfig
UpperCamelCase_ : Tuple = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Tuple=4 , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : str = eos_token_id
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Dict = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Tuple = prepare_led_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tf.concat(
[tf.zeros_like(UpperCAmelCase_ )[:, :-1], tf.ones_like(UpperCAmelCase_ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : int = global_attention_mask
return config, inputs_dict
def _A ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : str = TFLEDModel(config=UpperCAmelCase_ ).get_decoder()
SCREAMING_SNAKE_CASE : str = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : List[str] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Any = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Tuple = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase_ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase_ : int = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : int = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCAmelCase_ )
def _A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE : Tuple = 2
SCREAMING_SNAKE_CASE : List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_decoder_attentions_output(UpperCAmelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _A ( self : List[Any] ):
pass
def _A ( self : Optional[Any] ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
snake_case = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : Dict = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : List[Any] = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : int = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE : Any = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : Optional[int] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : List[Any] = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 , rtol=1E-3 )
| 62
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 226
| 0
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : Any = MODEL_FOR_MASKED_LM_MAPPING
A__ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def snake_case__ ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def snake_case__ ( self ) -> Optional[int]:
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
A__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
A__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
A__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def snake_case__ ( self ) -> Optional[int]:
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
A__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
A__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
A__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
A__ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def snake_case__ ( self ) -> Any:
A__ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
A__ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
@require_torch
def snake_case__ ( self ) -> Any:
A__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(SCREAMING_SNAKE_CASE__ )
@slow
@require_tf
def snake_case__ ( self ) -> int:
A__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1573, "token_str": " Chris"},
] , )
A__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12790,
"token_str": " Lyon",
},
] , )
A__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def snake_case__ ( self ) -> List[Any]:
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
A__ = None
A__ = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE__ , [] )
@require_tf
def snake_case__ ( self ) -> Optional[Any]:
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
A__ = None
A__ = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE__ , [] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
A__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = fill_masker.tokenizer
A__ = fill_masker.model
A__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
A__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
A__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
fill_masker("This is" )
self.run_test_top_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.run_test_targets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.run_test_top_k_targets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tokenizer.get_vocab()
A__ = sorted(vocab.keys() )[:2]
# Pipeline argument
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , targets=SCREAMING_SNAKE_CASE__ )
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
A__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE__ )
A__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE__ ) )
# Call argument
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
A__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE__ )
A__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE__ ) )
# Score equivalence
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=SCREAMING_SNAKE_CASE__ )
A__ = [top_mask["token_str"] for top_mask in outputs]
A__ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ ):
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=SCREAMING_SNAKE_CASE__ )
A__ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , nested_simplify(SCREAMING_SNAKE_CASE__ ) )
# Raises with invalid
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="" )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , top_k=2 )
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , nested_simplify(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tokenizer.get_vocab()
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# top_k=2, ntargets=3
A__ = sorted(vocab.keys() )[:3]
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=SCREAMING_SNAKE_CASE__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A__ = [el["token_str"] for el in sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x["score"] , reverse=SCREAMING_SNAKE_CASE__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE__ ).issubset(SCREAMING_SNAKE_CASE__ ):
A__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=SCREAMING_SNAKE_CASE__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , nested_simplify(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.get_vocab()
# String duplicates + id duplicates
A__ = sorted(vocab.keys() )[:3]
A__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=SCREAMING_SNAKE_CASE__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 3 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = FillMaskPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
A__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "score": ANY(SCREAMING_SNAKE_CASE__ ), "token": ANY(SCREAMING_SNAKE_CASE__ ), "token_str": ANY(SCREAMING_SNAKE_CASE__ )},
],
] , )
| 718
|
"""simple docstring"""
from timeit import timeit
UpperCamelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
A__ = 0
A__ = len(UpperCAmelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
A__ = len(UpperCAmelCase_ ) // 2
A__ = len(UpperCAmelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase_ ) )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
if len(UpperCAmelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCAmelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> None:
"""simple docstring"""
A__ = F"""all({name}(key) is value for key, value in test_data.items())"""
A__ = F"""from __main__ import test_data, {name}"""
A__ = 500000
A__ = timeit(stmt=UpperCAmelCase_, setup=UpperCAmelCase_, number=UpperCAmelCase_ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 562
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 339
|
from __future__ import annotations
from collections.abc import Callable
def _a ( UpperCamelCase_ : Callable[[int | float], int | float] , UpperCamelCase_ : int | float , UpperCamelCase_ : int | float , UpperCamelCase_ : int = 100 , ) -> float:
"""simple docstring"""
lowerCAmelCase__ = x_start
lowerCAmelCase__ = fnc(UpperCamelCase_ )
lowerCAmelCase__ = 0.0
for _ in range(UpperCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase__ = (x_end - x_start) / steps + xa
lowerCAmelCase__ = fnc(UpperCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCAmelCase__ = xa
lowerCAmelCase__ = fxa
return area
if __name__ == "__main__":
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
a_ = 10
while i <= 10_0000:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 339
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCamelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCamelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="binary" ) -> Union[str, Any]:
UpperCAmelCase__ : Dict = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average=lowerCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Union[str, Any] = {}
for id_pred, label in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCAmelCase__ : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase__ : int = [(pred, label)]
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase__ , UpperCAmelCase__ : str = zip(*lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average='''macro''' )
fas.append(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase__ ) )
ems.append(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = float(sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) )
UpperCAmelCase__ : Any = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = float(fa_score(y_true=lowerCAmelCase__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self : Dict , _A : int , _A : int ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_A , _A )}
elif self.config_name == "cb":
return acc_and_fa(_A , _A , fa_avg='''macro''' )
elif self.config_name == "record":
UpperCAmelCase__ : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
UpperCAmelCase__ : Optional[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_A , _A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_A , _A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_A , _A )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 312
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
UpperCAmelCase__ : int = CLIPTextModel(_A )
UpperCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase__ : Dict = 77
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Dict=0 ):
'''simple docstring'''
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase__ : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_A )
UpperCAmelCase__ : str = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**_A )
UpperCAmelCase__ : Any = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase__ : int = '''A photo of an astronaut'''
UpperCAmelCase__ : Dict = alt_pipe(**_A )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Union[str, Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : str = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_A )
UpperCAmelCase__ : Any = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**_A )
UpperCAmelCase__ : Tuple = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Dict = alt_pipe(**_A )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_A )
UpperCAmelCase__ : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Any = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_A , safety_checker=_A )
UpperCAmelCase__ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''numpy''' )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 312
| 1
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCAmelCase ( UpperCamelCase_ ):
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCamelCase_ , """_dynamo""" ):
return False
return isinstance(UpperCamelCase_ , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = True ):
__SCREAMING_SNAKE_CASE = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__SCREAMING_SNAKE_CASE = is_compiled_module(UpperCamelCase_ )
if is_compiled:
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = model.module
if not keep_fpaa_wrapper:
__SCREAMING_SNAKE_CASE = getattr(UpperCamelCase_ , """forward""" )
__SCREAMING_SNAKE_CASE = model.__dict__.pop("""_original_forward""" , UpperCamelCase_ )
if original_forward is not None:
while hasattr(UpperCamelCase_ , """__wrapped__""" ):
__SCREAMING_SNAKE_CASE = forward.__wrapped__
if forward == original_forward:
break
__SCREAMING_SNAKE_CASE = forward
if getattr(UpperCamelCase_ , """_converted_to_transformer_engine""" , UpperCamelCase_ ):
convert_model(UpperCamelCase_ , to_transformer_engine=UpperCamelCase_ )
if is_compiled:
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = compiled_model
return model
def _lowerCAmelCase ( ):
PartialState().wait_for_everyone()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase_ , UpperCamelCase_ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase_ , UpperCamelCase_ )
@contextmanager
def _lowerCAmelCase ( **UpperCamelCase_ ):
for key, value in kwargs.items():
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCAmelCase ( UpperCamelCase_ ):
if not hasattr(UpperCamelCase_ , """__qualname__""" ) and not hasattr(UpperCamelCase_ , """__name__""" ):
__SCREAMING_SNAKE_CASE = getattr(UpperCamelCase_ , """__class__""" , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , """__qualname__""" ):
return obj.__qualname__
if hasattr(UpperCamelCase_ , """__name__""" ):
return obj.__name__
return str(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
for key, value in source.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = destination.setdefault(UpperCamelCase_ , {} )
merge_dicts(UpperCamelCase_ , UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = value
return destination
def _lowerCAmelCase ( UpperCamelCase_ = None ):
if port is None:
__SCREAMING_SNAKE_CASE = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 155
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = '''speech_to_text'''
__lowercase : List[str] = ['''past_key_values''']
__lowercase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=1_2 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=6_0_0_0 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2 , lowerCAmelCase__=(5, 5) , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = max_source_positions
__SCREAMING_SNAKE_CASE = max_target_positions
__SCREAMING_SNAKE_CASE = num_conv_layers
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = conv_channels
__SCREAMING_SNAKE_CASE = input_feat_per_channel
__SCREAMING_SNAKE_CASE = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 155
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase_ : Optional[Any] = 'scheduler_config.json'
class UpperCAmelCase__ ( lowerCAmelCase__ ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 5
@dataclass
class UpperCAmelCase__ ( lowerCAmelCase__ ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
lowerCAmelCase_ = SCHEDULER_CONFIG_NAME
lowerCAmelCase_ = ["dtype"]
lowerCAmelCase_ = []
lowerCAmelCase_ = True
@classmethod
def lowerCamelCase_ ( cls : int,__A : Optional[Any] = None,__A : Union[str, Any] = None,__A : int=False,**__A : Tuple,):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase,subfolder=_lowerCamelCase,return_unused_kwargs=_lowerCamelCase,**_lowerCamelCase,)
_lowerCamelCase , _lowerCamelCase : str = cls.from_config(_lowerCamelCase,return_unused_kwargs=_lowerCamelCase,**_lowerCamelCase )
if hasattr(_lowerCamelCase,"create_state" ) and getattr(_lowerCamelCase,"has_state",_lowerCamelCase ):
_lowerCamelCase : str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase_ ( self : Optional[int],__A : List[Any],__A : Union[str, Any] = False,**__A : Tuple ):
self.save_config(save_directory=_lowerCamelCase,push_to_hub=_lowerCamelCase,**_lowerCamelCase )
@property
def lowerCamelCase_ ( self : Dict ):
return self._get_compatibles()
@classmethod
def lowerCamelCase_ ( cls : List[Any] ):
_lowerCamelCase : int = list(set([cls.__name__] + cls._compatibles ) )
_lowerCamelCase : int = importlib.import_module(__name__.split("." )[0] )
_lowerCamelCase : List[str] = [
getattr(_lowerCamelCase,_lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase,_lowerCamelCase )
]
return compatible_classes
def A_ ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Tuple[int] ):
"""simple docstring"""
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0.9_9_9 , _lowerCAmelCase : Union[str, Any]=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowerCAmelCase : Any ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
_lowerCamelCase : Tuple = []
for i in range(lowerCamelCase_ ):
_lowerCamelCase : Dict = i / num_diffusion_timesteps
_lowerCamelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Dict ):
_lowerCamelCase : Optional[Any] = scheduler.config
if config.trained_betas is not None:
_lowerCamelCase : Dict = jnp.asarray(config.trained_betas,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCamelCase : int = jnp.linspace(config.beta_start,config.beta_end,config.num_train_timesteps,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5,config.beta_end**0.5,config.num_train_timesteps,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : List[str] = betas_for_alpha_bar(config.num_train_timesteps,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
_lowerCamelCase : int = 1.0 - betas
_lowerCamelCase : List[Any] = jnp.cumprod(_lowerCamelCase,axis=0 )
return cls(
alphas=_lowerCamelCase,betas=_lowerCamelCase,alphas_cumprod=_lowerCamelCase,)
def A_ ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
_lowerCamelCase : List[str] = state.alphas_cumprod
_lowerCamelCase : Optional[int] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[str] = sqrt_alpha_prod.flatten()
_lowerCamelCase : List[Any] = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowerCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : List[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowerCamelCase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A_ ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A_ ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase : Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 711
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A )
| 11
| 0
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( snake_case_ : Dict ) -> tuple:
return (data["data"], data["target"])
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Tuple ) -> np.ndarray:
__snake_case = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(a__ , a__ )
# Predict target for test data
__snake_case = xgb.predict(a__ )
__snake_case = predictions.reshape(len(a__ ) , 1 )
return predictions
def lowerCamelCase__ ( ) -> None:
__snake_case = fetch_california_housing()
__snake_case , __snake_case = data_handling(a__ )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
a__ , a__ , test_size=0.25 , random_state=1 )
__snake_case = xgboost(a__ , a__ , a__ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(a__ , a__ )}""" )
print(f"""Mean Square Error : {mean_squared_error(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 592
|
from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 69
|
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case__ : Dict = str(bin(snake_case__ ) )
binary_number += "0" * shift_amount
return binary_number
def _A ( snake_case__ : int , snake_case__ : int ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case__ : Tuple = str(bin(snake_case__ ) )[2:]
if shift_amount >= len(snake_case__ ):
return "0b0"
snake_case__ : Dict = binary_number[: len(snake_case__ ) - shift_amount]
return "0b" + shifted_binary_number
def _A ( snake_case__ : int , snake_case__ : int ):
if number >= 0: # Get binary representation of positive number
snake_case__ : Optional[Any] = '''0''' + str(bin(snake_case__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case__ : Tuple = len(bin(snake_case__ )[3:] ) # Find 2's complement of number
snake_case__ : Optional[Any] = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:]
snake_case__ : Optional[Any] = (
'''1''' + '''0''' * (binary_number_length - len(snake_case__ )) + binary_number
)
if shift_amount >= len(snake_case__ ):
return "0b" + binary_number[0] * len(snake_case__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261
| 0
|
from __future__ import annotations
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = 20
lowerCAmelCase__ = self._get_uniform_logits(batch_size=2 ,length=a_ )
# tweak scores to not be uniform anymore
lowerCAmelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase__ = jax.nn.softmax(a_ ,axis=-1 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_sharper(a_ ,scores.copy() ,cur_len=a_ ) ,axis=-1 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_smoother(a_ ,scores.copy() ,cur_len=a_ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create ramp distribution
lowerCAmelCase__ = np.broadcast_to(np.arange(a_ )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCAmelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = top_k_warp(a_ ,a_ ,cur_len=a_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCAmelCase__ = np.broadcast_to(np.arange(a_ )[None, :] ,(batch_size, length) ).copy()
lowerCAmelCase__ = top_k_warp_safety_check(a_ ,a_ ,cur_len=a_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase__ = np.exp(top_p_warp(a_ ,a_ ,cur_len=a_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(a_ ,a_ ,atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase__ = np.broadcast_to(np.arange(a_ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCAmelCase__ = top_p_warp(a_ ,a_ ,cur_len=a_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=a_ )
# check that min length is applied at length 5
lowerCAmelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCAmelCase__ = 5
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = min_dist_processor(a_ ,a_ ,cur_len=a_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = 15
lowerCAmelCase__ = min_dist_processor(a_ ,a_ ,cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCAmelCase__ = 1
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = logits_processor(a_ ,a_ ,cur_len=a_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = logits_processor(a_ ,a_ ,cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ ,eos_token_id=a_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCAmelCase__ = 4
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = logits_processor(a_ ,a_ ,cur_len=a_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = logits_processor(a_ ,a_ ,cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) ,a_ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=a_ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ ,eos_token_id=a_ )
lowerCAmelCase__ = 10
# no processor list
lowerCAmelCase__ = temp_dist_warp(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = top_k_warp(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = top_p_warp(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = min_dist_proc(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = bos_dist_proc(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = eos_dist_proc(a_ ,a_ ,cur_len=a_ )
# with processor list
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(a_ ,a_ ,cur_len=a_ )
# scores should be equal
self.assertTrue(jnp.allclose(a_ ,a_ ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) ,a_ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(a_ ,a_ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=a_ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ ,eos_token_id=a_ )
lowerCAmelCase__ = 10
# no processor list
def run_no_processor_list(a_ ,a_ ,a_ ):
lowerCAmelCase__ = temp_dist_warp(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = top_k_warp(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = top_p_warp(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = min_dist_proc(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = bos_dist_proc(a_ ,a_ ,cur_len=a_ )
lowerCAmelCase__ = eos_dist_proc(a_ ,a_ ,cur_len=a_ )
return scores
# with processor list
def run_processor_list(a_ ,a_ ,a_ ):
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(a_ ,a_ ,cur_len=a_ )
return scores
lowerCAmelCase__ = jax.jit(a_ )
lowerCAmelCase__ = jax.jit(a_ )
lowerCAmelCase__ = jitted_run_no_processor_list(a_ ,a_ ,a_ )
lowerCAmelCase__ = jitted_run_processor_list(a_ ,a_ ,a_ )
# scores should be equal
self.assertTrue(jnp.allclose(a_ ,a_ ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 604
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __A ( ):
lowerCAmelCase : str = Github(os.environ["GITHUB_TOKEN"] )
lowerCAmelCase : Any = g.get_repo("huggingface/transformers" )
lowerCAmelCase : Optional[Any] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCAmelCase : str = sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
lowerCAmelCase : List[str] = comments[0] if len(a_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 525
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase = random.Random()
def __A ( a_ : Union[str, Any] ,a_ : Tuple=1.0 ,a_ : Optional[int]=None ,a_ : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase : str = global_rng
lowerCAmelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=400 , a_=2_000 , a_=1 , a_=0.0 , a_=16_000 , a_=True , a_=True , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = min_seq_length
lowerCAmelCase : Dict = max_seq_length
lowerCAmelCase : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : Union[str, Any] = feature_size
lowerCAmelCase : Tuple = padding_value
lowerCAmelCase : Dict = sampling_rate
lowerCAmelCase : int = return_attention_mask
lowerCAmelCase : Optional[Any] = do_normalize
def _lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , a_=False , a_=False ):
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
lowerCAmelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
class lowerCamelCase ( _A , unittest.TestCase ):
snake_case_ = WavaVecaFeatureExtractor
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def _lowerCamelCase ( self , a_ ):
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase : Any = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase : int = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase : Tuple = np.asarray(a_ )
lowerCAmelCase : int = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase : Union[str, Any] = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase : Optional[int] = [None, 1_600, None]
for max_length, padding in zip(a_ , a_ ):
lowerCAmelCase : List[Any] = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors="np" )
lowerCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Optional[int] = range(800 , 1_400 , 200 )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase : str = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase : Any = [None, 1_600, None]
for max_length, padding in zip(a_ , a_ ):
lowerCAmelCase : Dict = feat_extract(a_ , max_length=a_ , padding=a_ )
lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Dict = feat_extract(
a_ , truncation=a_ , max_length=1_000 , padding="max_length" , return_tensors="np" )
lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = feat_extract(
a_ , truncation=a_ , max_length=1_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = feat_extract(
a_ , truncation=a_ , max_length=2_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def _lowerCamelCase ( self ):
import torch
lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : List[Any] = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowerCamelCase ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(a_ )
lowerCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(a_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 525
| 1
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowerCAmelCase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
_lowerCAmelCase = "sshleifer/student_marian_en_ro_6_1"
_lowerCAmelCase = "sshleifer/tiny-mbart"
@require_torch
class A ( _lowerCamelCase ):
'''simple docstring'''
def a_ (self , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , ) -> Optional[Any]:
__UpperCamelCase : Any = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
__UpperCamelCase : List[Any] = TrainerState.load_from_json(os.path.join(A__ , "trainer_state.json" ) ).log_history
if not do_eval:
return
__UpperCamelCase : Optional[Any] = [log for log in logs if "eval_loss" in log.keys()]
__UpperCamelCase : int = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__UpperCamelCase : Any = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , A__ )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def a_ (self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def a_ (self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def a_ (self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ (self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ (self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ (self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=A__ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ (self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def a_ (self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def a_ (self , _UpperCAmelCase ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__UpperCamelCase : Optional[Any] = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
__UpperCamelCase : Tuple = experiments[experiment_id]
__UpperCamelCase : Any = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
__UpperCamelCase : Any = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["extra_args_str"] )
__UpperCamelCase : Tuple = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["n_matches"] )
@slow
def a_ (self ) -> Any:
__UpperCamelCase : Union[str, Any] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=A__ , )
# Check metrics
__UpperCamelCase : Optional[int] = TrainerState.load_from_json(os.path.join(A__ , "trainer_state.json" ) ).log_history
__UpperCamelCase : Optional[Any] = [log for log in logs if "eval_loss" in log.keys()]
__UpperCamelCase : Optional[int] = eval_metrics[0]
__UpperCamelCase : List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , A__ )
# test if do_predict saves generations and metrics
__UpperCamelCase : Tuple = os.listdir(A__ )
__UpperCamelCase : Tuple = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def a_ (self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_UpperCAmelCase ) -> Tuple[int, float]:
__UpperCamelCase : Dict = "--skip_memory_metrics 0"
__UpperCamelCase : str = self.run_trainer(
max_len=1_2_8 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
__UpperCamelCase : int = TrainerState.load_from_json(Path(A__ , "trainer_state.json" ) ).log_history
__UpperCamelCase : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**2_0 )
__UpperCamelCase : Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**2_0 )
__UpperCamelCase : Any = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__UpperCamelCase : List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__UpperCamelCase : Union[str, Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
__UpperCamelCase : Any = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__UpperCamelCase : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__UpperCamelCase : Dict = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3E-3 , _UpperCAmelCase = "adafactor" , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , ) -> Dict:
__UpperCamelCase : Optional[Any] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
__UpperCamelCase : List[str] = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
__UpperCamelCase : str = "\n --do_predict\n ".split()
__UpperCamelCase : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__UpperCamelCase : Optional[Any] = get_gpu_count()
__UpperCamelCase : str = get_torch_dist_unique_port()
__UpperCamelCase : int = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
__UpperCamelCase : Optional[int] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
__UpperCamelCase : Optional[int] = ["run_translation.py"] + args
with patch.object(A__ , "argv" , A__ ):
main()
return output_dir
| 700
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
__UpperCamelCase : List[Any] = hex_num[0] == "-"
if is_negative:
__UpperCamelCase : str = hex_num[1:]
try:
__UpperCamelCase : Optional[int] = int(snake_case__ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
__UpperCamelCase : Tuple = ""
while int_num > 0:
__UpperCamelCase : Union[str, Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 399
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _a ( __a ):
"""simple docstring"""
A_ = '''fnet'''
def __init__( self : Optional[int] , lowercase_ : Optional[Any]=32_000 , lowercase_ : Dict=768 , lowercase_ : Dict=12 , lowercase_ : str=3_072 , lowercase_ : Union[str, Any]="gelu_new" , lowercase_ : int=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=4 , lowercase_ : Any=0.0_2 , lowercase_ : List[str]=1e-12 , lowercase_ : str=False , lowercase_ : Dict=512 , lowercase_ : int=3 , lowercase_ : Any=1 , lowercase_ : List[Any]=2 , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = initializer_range
lowercase_ = type_vocab_size
lowercase_ = layer_norm_eps
lowercase_ = use_tpu_fourier_optimizations
lowercase_ = tpu_short_seq_length
| 451
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class _a ( __a ):
"""simple docstring"""
A_ = '''camembert'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any]=30_522 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Dict=12 , lowercase_ : Tuple=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=2 , lowercase_ : str=0.0_2 , lowercase_ : int=1e-12 , lowercase_ : str=1 , lowercase_ : List[str]=0 , lowercase_ : int=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , **lowercase_ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
class _a ( __a ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 451
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) ->Union[str, Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , use_stable_embedding=lowerCamelCase , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
__a = OpenLlamaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Optional[int]:
'''simple docstring'''
__a = True
__a = OpenLlamaModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Union[str, Any]:
'''simple docstring'''
__a = OpenLlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Union[str, Any]:
'''simple docstring'''
__a = True
__a = True
__a = OpenLlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )['hidden_states'][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )['hidden_states'][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__a =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__a =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =False
__a =False
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = OpenLlamaModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict['input_ids']
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = 'single_label_classification'
__a = input_dict['input_ids']
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = 'multi_label_classification'
__a = input_dict['input_ids']
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , lowerCamelCase ) ->int:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ids_tensor([1, 10] , config.vocab_size )
__a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = OpenLlamaModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
__a = original_model(lowerCamelCase ).last_hidden_state
__a = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = {'type': scaling_type, 'factor': 10.0}
__a = OpenLlamaModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
__a = scaled_model(lowerCamelCase ).last_hidden_state
__a = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) )
| 270
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 270
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : Any=13, lowerCamelCase : Any=7, lowerCamelCase : List[Any]=True, lowerCamelCase : List[str]=True, lowerCamelCase : List[Any]=True, lowerCamelCase : str=True, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : List[str]=32, lowerCamelCase : int=2, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Tuple=37, lowerCamelCase : int="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : Dict=512, lowerCamelCase : Optional[Any]=16, lowerCamelCase : str=2, lowerCamelCase : List[str]=0.02, lowerCamelCase : int=3, lowerCamelCase : Any=4, lowerCamelCase : Tuple=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 384
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = '''gelu'''
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = 128
lowercase__ = 2
lowercase__ = 9
lowercase__ = 1
lowercase__ = None
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCamelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = TFConvBertModel(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = TFConvBertForMaskedLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFConvBertForSequenceClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFConvBertForMultipleChoice(config=lowerCamelCase )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : str, lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFConvBertForTokenClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[str], lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = TFConvBertForQuestionAnswering(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = TFConvBertModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
lowercase__ = True
if hasattr(lowerCamelCase, '''use_cache''' ):
lowercase__ = True
lowercase__ = getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowercase__ = getattr(self.model_tester, '''key_length''', lowerCamelCase )
for model_class in self.all_model_classes:
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowercase__ = model_class(lowerCamelCase )
lowercase__ = len(model(lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase, saved_model=lowerCamelCase )
lowercase__ = os.path.join(lowerCamelCase, '''saved_model''', '''1''' )
lowercase__ = tf.keras.models.load_model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
if self.is_encoder_decoder:
lowercase__ = outputs['''encoder_hidden_states''']
lowercase__ = outputs['''encoder_attentions''']
else:
lowercase__ = outputs['''hidden_states''']
lowercase__ = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
lowercase__ = getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
lowercase__ = getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowercase__ = getattr(self.model_tester, '''key_length''', lowerCamelCase )
lowercase__ = getattr(self.model_tester, '''key_length''', lowerCamelCase )
def check_decoder_attentions_output(lowerCamelCase : Union[str, Any] ):
lowercase__ = len(lowerCamelCase )
self.assertEqual(out_len % 2, 0 )
lowercase__ = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(lowerCamelCase : Optional[int] ):
lowercase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = model_class(lowerCamelCase )
lowercase__ = model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states, lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(config.output_hidden_states, lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
lowercase__ = model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(config.output_hidden_states, lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
lowercase__ = model(self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states, lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = [1, 6, 768]
self.assertEqual(output.shape, lowerCamelCase )
lowercase__ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase, atol=1E-4 )
| 183
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Dict=13, lowerCamelCase : Optional[int]=7, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : List[Any]=99, lowerCamelCase : Any=32, lowerCamelCase : List[str]=5, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[Any]=512, lowerCamelCase : int=16, lowerCamelCase : str=2, lowerCamelCase : int=0.02, lowerCamelCase : int=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = True
lowercase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = [1, 11, 50_265]
self.assertEqual(list(output.shape ), lowerCamelCase )
# compare the actual values for a slice.
lowercase__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
lowercase__ = model(lowerCamelCase )[0]
# compare the actual values for a slice.
lowercase__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 183
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_UpperCamelCase : int = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_UpperCamelCase : Optional[int] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_UpperCamelCase : int = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case__ ( datasets.Metric):
def A ( self : str ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def A ( self : str , _A : str , _A : List[str] , _A : Optional[int]=False ) -> Tuple:
if return_pvalue:
UpperCAmelCase_ : List[Any] = pearsonr(_A , _A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_A , _A )[0] )}
| 216
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_UpperCamelCase : List[str] = tuple[int, int]
class snake_case__ :
def __init__( self : Dict , _A : int , _A : int , _A : int , _A : int , _A : int , _A : Node | None , ) -> None:
UpperCAmelCase_ : str = pos_x
UpperCAmelCase_ : Union[str, Any] = pos_y
UpperCAmelCase_ : int = (pos_y, pos_x)
UpperCAmelCase_ : Tuple = goal_x
UpperCAmelCase_ : List[Any] = goal_y
UpperCAmelCase_ : List[str] = g_cost
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[str] = self.calculate_heuristic()
UpperCAmelCase_ : str = self.g_cost + self.h_cost
def A ( self : Union[str, Any] ) -> float:
UpperCAmelCase_ : List[str] = self.pos_x - self.goal_x
UpperCAmelCase_ : Dict = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_A ) + abs(_A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , _A : Node ) -> bool:
return self.f_cost < other.f_cost
class snake_case__ :
def __init__( self : List[Any] , _A : TPosition , _A : TPosition ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
UpperCAmelCase_ : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _A )
UpperCAmelCase_ : Dict = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : List[Any] = False
def A ( self : Optional[int] ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_A )
self.closed_nodes.append(_A )
UpperCAmelCase_ : Optional[Any] = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase_ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
return [self.start.pos]
def A ( self : Any , _A : Node ) -> list[Node]:
UpperCAmelCase_ : Optional[Any] = []
for action in delta:
UpperCAmelCase_ : List[str] = parent.pos_x + action[1]
UpperCAmelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def A ( self : List[Any] , _A : Node | None ) -> list[TPosition]:
UpperCAmelCase_ : List[Any] = node
UpperCAmelCase_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[Any] = current_node.parent
path.reverse()
return path
class snake_case__ :
def __init__( self : int , _A : TPosition , _A : TPosition ) -> None:
UpperCAmelCase_ : Any = AStar(_A , _A )
UpperCAmelCase_ : Dict = AStar(_A , _A )
UpperCAmelCase_ : Union[str, Any] = False
def A ( self : Union[str, Any] ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : Optional[Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : Optional[int] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_A , _A )
self.fwd_astar.closed_nodes.append(_A )
self.bwd_astar.closed_nodes.append(_A )
UpperCAmelCase_ : int = current_bwd_node
UpperCAmelCase_ : int = current_fwd_node
UpperCAmelCase_ : List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(_A ),
self.bwd_astar: self.bwd_astar.get_successors(_A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase_ : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_A )
else:
astar.open_nodes.append(_A )
return [self.fwd_astar.start.pos]
def A ( self : List[Any] , _A : Node , _A : Node ) -> list[TPosition]:
UpperCAmelCase_ : Dict = self.fwd_astar.retrace_path(_A )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_UpperCamelCase : Optional[int] = (0, 0)
_UpperCamelCase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase : str = time.time()
_UpperCamelCase : int = AStar(init, goal)
_UpperCamelCase : Any = a_star.search()
_UpperCamelCase : str = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_UpperCamelCase : Union[str, Any] = time.time()
_UpperCamelCase : Dict = BidirectionalAStar(init, goal)
_UpperCamelCase : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 216
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase ( __lowerCamelCase : int ) ->bool:
_SCREAMING_SNAKE_CASE = int(number**0.5 )
return number == sq * sq
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) ->tuple[int, int]:
_SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_SCREAMING_SNAKE_CASE = x_den * y_den * z_den
_SCREAMING_SNAKE_CASE = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase ( __lowerCamelCase : int = 35 ) ->int:
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = Fraction(0 )
_SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
_SCREAMING_SNAKE_CASE = x_den * y_den
_SCREAMING_SNAKE_CASE = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
_SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = int(sqrt(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = int(sqrt(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
_SCREAMING_SNAKE_CASE = x_num * y_num
_SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
_SCREAMING_SNAKE_CASE = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
_SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
_SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = int(sqrt(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = int(sqrt(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->list:
if len(__lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
_SCREAMING_SNAKE_CASE = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->Dict:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->Any:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def lowerCamelCase ( __lowerCamelCase : list ) ->tuple[list, list, list, list]:
if len(__lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_length // 2
_SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = [
[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )
]
_SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase ( __lowerCamelCase : list ) ->tuple[int, int]:
return len(__lowerCamelCase ), len(matrix[0] )
def lowerCamelCase ( __lowerCamelCase : list ) ->None:
print("""\n""".join(str(__lowerCamelCase ) for line in matrix ) )
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->list:
if matrix_dimensions(__lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = split_matrix(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = split_matrix(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = matrix_addition(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_addition(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_addition(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
# construct the new matrix from our 4 quadrants
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list ) ->list:
if matrix_dimensions(__lowerCamelCase )[1] != matrix_dimensions(__lowerCamelCase )[0]:
_SCREAMING_SNAKE_CASE = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_dimensions(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = matrix_dimensions(__lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_SCREAMING_SNAKE_CASE = max(*__lowerCamelCase , *__lowerCamelCase )
_SCREAMING_SNAKE_CASE = int(math.pow(2 , math.ceil(math.loga(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE = matrixa
_SCREAMING_SNAKE_CASE = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_SCREAMING_SNAKE_CASE = actual_strassen(__lowerCamelCase , __lowerCamelCase )
# Removing the additional zeros
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 314
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def __lowerCamelCase ( lowerCAmelCase__ = 150_0000 ):
A__ = defaultdict(lowerCAmelCase__ )
A__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,lowerCAmelCase__ ,2 ):
if gcd(lowerCAmelCase__ ,lowerCAmelCase__ ) > 1:
continue
A__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCAmelCase__ ,limit + 1 ,lowerCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 554
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 554
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[Any] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __a , __a ):
__lowerCamelCase : str = False
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : List[Any] = self.tokenizer
__lowerCamelCase : List[str] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
return text_encoding
# add pixel_values
__lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a )
if text is not None:
__lowerCamelCase : Tuple = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
else:
__lowerCamelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__a )
return encoding_image_processor
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : int = (DDPMParallelScheduler,)
def snake_case_ ( self , **__a ):
__lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__a )
return config
def snake_case_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case_ ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def snake_case_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def snake_case_ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def snake_case_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def snake_case_ ( self ):
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case_ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=__a )
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Any = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Optional[int] = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**__a )
__lowerCamelCase : Any = len(__a )
__lowerCamelCase : Dict = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[Any] = self.dummy_sample_deter + 0.1
__lowerCamelCase : Any = self.dummy_sample_deter - 0.1
__lowerCamelCase : Optional[Any] = samplea.shape[0]
__lowerCamelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCamelCase : Optional[int] = torch.arange(__a )[0:3, None].repeat(1 , __a )
__lowerCamelCase : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCamelCase : Dict = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
__lowerCamelCase : Dict = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**__a )
__lowerCamelCase : Optional[Any] = len(__a )
__lowerCamelCase : int = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
__lowerCamelCase : List[str] = model(__a , __a )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : List[str] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
__lowerCamelCase : int = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(__a ) )
__lowerCamelCase : Any = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCamelCase : Tuple = scheduler_class(**__a )
__lowerCamelCase : Tuple = len(__a )
__lowerCamelCase : str = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter
__lowerCamelCase : str = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
__lowerCamelCase : str = model(__a , __a )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
__lowerCamelCase : Optional[int] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**__a )
__lowerCamelCase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
__lowerCamelCase : str = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
__lowerCamelCase : Tuple = -1
else:
__lowerCamelCase : Dict = timesteps[i + 1]
__lowerCamelCase : Optional[Any] = scheduler.previous_timestep(__a )
__lowerCamelCase : Dict = prev_t.item()
self.assertEqual(__a , __a )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Dict = scheduler_class(**__a )
__lowerCamelCase : Any = [100, 87, 50, 51, 0]
with self.assertRaises(__a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__a )
def snake_case_ ( self ):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**__a )
__lowerCamelCase : List[str] = [100, 87, 50, 1, 0]
__lowerCamelCase : Dict = len(__a )
with self.assertRaises(__a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**__a )
__lowerCamelCase : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__a )
| 594
| 1
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowercase : int ="""3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 720
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Optional[Any] ={
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =[
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 550
| 0
|
"""simple docstring"""
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self , snake_case_=None ) -> int:
_UpperCAmelCase = data
_UpperCAmelCase = None
def __repr__( self ) -> Tuple:
_UpperCAmelCase = []
_UpperCAmelCase = self
while temp:
string_rep.append(F"""{temp.data}""" )
_UpperCAmelCase = temp.next
return "->".join(__a )
def A__ ( A__ ) -> Optional[int]:
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
_UpperCAmelCase = Node(elements_list[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = Node(elements_list[i] )
_UpperCAmelCase = current.next
return head
def A__ ( A__ ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def A__ ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
_UpperCAmelCase = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_UpperCAmelCase )
print("Elements in Reverse:" )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 426
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = ['''image_processor''', '''tokenizer''']
A__ = '''CLIPImageProcessor'''
A__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , __a : str=None , __a : List[Any]=None , **__a : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
__snake_case : List[str] = kwargs.pop('feature_extractor' )
__snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Optional[int]=None , __a : Union[str, Any]=None , **__a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__snake_case : Any = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__snake_case : str = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def A_ ( self : List[Any] , *__a : Dict , **__a : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def A_ ( self : str , *__a : Tuple , **__a : List[str] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Any ) -> Any:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor
| 286
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any=13 , lowerCamelCase__ : str=30 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : Any=0.0_2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=2 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Any = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = scope
_UpperCAmelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCAmelCase : Tuple = num_patches + 2
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[int] = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = self.type_sequence_label_size
_UpperCAmelCase : Optional[Any] = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Tuple = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : Optional[int] = config_and_inputs
_UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DeiTModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Any = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase : int = False
_UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_UpperCAmelCase : Any = problem_type["title"]
_UpperCAmelCase : Dict = problem_type["num_labels"]
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
_UpperCAmelCase : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCAmelCase : Union[str, Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
_UpperCAmelCase : Union[str, Any] = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Any = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : Any = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=0 , ) -> Dict:
__UpperCamelCase :Optional[Any] = parent
__UpperCamelCase :str = batch_size
__UpperCamelCase :int = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Any = use_input_mask
__UpperCamelCase :Optional[int] = use_token_type_ids
__UpperCamelCase :Union[str, Any] = use_labels
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = hidden_size
__UpperCamelCase :Tuple = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Tuple = intermediate_size
__UpperCamelCase :Any = hidden_act
__UpperCamelCase :Tuple = hidden_dropout_prob
__UpperCamelCase :Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :Union[str, Any] = type_vocab_size
__UpperCamelCase :Optional[Any] = type_sequence_label_size
__UpperCamelCase :Optional[int] = initializer_range
__UpperCamelCase :Any = num_labels
__UpperCamelCase :List[Any] = num_choices
__UpperCamelCase :str = scope
__UpperCamelCase :int = projection_dim
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__UpperCamelCase :int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :str = None
__UpperCamelCase :str = None
__UpperCamelCase :Optional[int] = None
if self.use_labels:
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
__UpperCamelCase :Optional[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :int = TFDPRContextEncoder(config=__lowercase)
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :Optional[Any] = model(__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = TFDPRQuestionEncoder(config=__lowercase)
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :List[Any] = TFDPRReader(config=__lowercase)
__UpperCamelCase :int = model(__lowercase , attention_mask=__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Dict = config_and_inputs
__UpperCamelCase :Tuple = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a__ : str = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
a__ : Any = False
a__ : int = False
a__ : Dict = False
a__ : Any = False
a__ : List[str] = False
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = TFDPRModelTester(self)
__UpperCamelCase :Tuple = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :str = TFDPRContextEncoder.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[int] = TFDPRContextEncoder.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Tuple = TFDPRQuestionEncoder.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Dict = TFDPRReader.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :str = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
__UpperCamelCase :int = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]]) # [CLS] hello, is my dog cute? [SEP]
__UpperCamelCase :Optional[int] = model(__lowercase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__UpperCamelCase :List[str] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4))
| 167
|
import argparse
import json
import subprocess
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = []
__UpperCamelCase :Dict = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__UpperCamelCase :Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE , shell=SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
__UpperCamelCase :Union[str, Any] = output.stdout.decode('''utf-8''' )
__UpperCamelCase :List[Any] = json.loads(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCamelCase :Union[str, Any] = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return values.split(''',''' )
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__lowercase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 167
| 1
|
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self : Tuple , A_ : int , A_ : List[str] , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = name
__lowercase = value
__lowercase = weight
def __repr__( self : Tuple ):
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return self.value
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return self.name
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self.weight
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.value / self.weight
def lowerCAmelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__lowercase = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
__lowercase = []
__lowercase , __lowercase = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCAmelCase_ ( ):
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase__ =12_8022
UpperCAmelCase__ =12_8028
@require_sentencepiece
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Dict = MaMaaaTokenizer
a : Optional[int] = False
a : str = False
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(A_ , range(len(A_ ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(A_ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__lowercase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **A_ : List[Any] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[int] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(A_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2, 3, 4, 5, 6] , )
__lowercase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
__lowercase = tokenizer.convert_tokens_to_string(A_ )
self.assertEqual(A_ , """This is a test""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = {"""input_ids""": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
a : Optional[int] = """facebook/m2m100_418M"""
a : str = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
a : int = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
a : Optional[int] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ):
'''simple docstring'''
__lowercase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
__lowercase = 1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 1_2_8_0_6_3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.tokenizer.get_vocab()
self.assertEqual(len(A_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """en"""
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
__lowercase = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__lowercase = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A_ )
__lowercase = MaMaaaTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.lang_token_to_id , A_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = """en"""
__lowercase = """fr"""
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors="""pt""" )
__lowercase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__lowercase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__lowercase = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__lowercase = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(A_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 1_2_8_0_0_6,
} , )
| 442
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase : Any = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *A_ : Tuple , **A_ : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 70
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[Any] = Dict[str, Any]
lowerCamelCase : Dict = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *A_ : int , **A_ : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a__ ( self : Union[str, Any] , **A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {}
if "threshold" in kwargs:
lowerCamelCase_ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : str , *A_ : Optional[int] , **A_ : Tuple ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = load_image(A_ )
lowerCamelCase_ = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase_ = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCamelCase_ = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCamelCase_ = target_size
return inputs
def a__ ( self : Union[str, Any] , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_inputs.pop('target_size' )
lowerCamelCase_ = self.model(**A_ )
lowerCamelCase_ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase_ = model_inputs['bbox']
return model_outputs
def a__ ( self : str , A_ : Any , A_ : Tuple=0.9 ) -> str:
"""simple docstring"""
lowerCamelCase_ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase_ , lowerCamelCase_ = target_size[0].tolist()
def unnormalize(A_ : Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCamelCase_ , lowerCamelCase_ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase_ = [unnormalize(A_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase_ = self.image_processor.post_process_object_detection(A_ , A_ , A_ )
lowerCamelCase_ = raw_annotations[0]
lowerCamelCase_ = raw_annotation['scores']
lowerCamelCase_ = raw_annotation['labels']
lowerCamelCase_ = raw_annotation['boxes']
lowerCamelCase_ = scores.tolist()
lowerCamelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase_ = [self._get_bounding_box(A_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [
dict(zip(A_ , A_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def a__ ( self : Union[str, Any] , A_ : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = box.int().tolist()
lowerCamelCase_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 70
| 1
|
'''simple docstring'''
import numpy as np
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = int(np.ceil((x_end - xa) / h ) )
_lowerCamelCase : str = np.zeros((n + 1,) )
_lowerCamelCase : Tuple = ya
_lowerCamelCase : Tuple = xa
for k in range(_lowerCAmelCase ):
_lowerCamelCase : Any = f(_lowerCAmelCase , y[k] )
_lowerCamelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : Optional[int] = f(x + h , y[k] + h * ka )
_lowerCamelCase : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 11
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , ):
A : Any = {}
if train_file is not None:
A : Any = [train_file]
if eval_file is not None:
A : Any = [eval_file]
if test_file is not None:
A : str = [test_file]
A : int = datasets.load_dataset('''csv''' , data_files=lowerCamelCase_ )
A : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() )
A : Optional[int] = features_name.pop(lowerCamelCase_ )
A : Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
A : Any = {label: i for i, label in enumerate(lowerCamelCase_ )}
A : Optional[Any] = tokenizer.model_input_names
A : Optional[Any] = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
A : List[Any] = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
A : Tuple = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A : Any = {k: v for k, v in ex.items() if k in input_names}
A : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A : Dict = {k: v for k, v in ex.items() if k in input_names}
A : Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A : List[str] = {k: v for k, v in ex.items() if k in input_names}
A : int = labelaid[ex[label_name]]
yield (d, label)
A : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A : str = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A : Optional[int] = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase : int = logging.getLogger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : int = field(metadata={'''help''': '''Which column contains the label'''} )
UpperCAmelCase_ : str = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The path of the training file'''} )
UpperCAmelCase_ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The path of the development file'''} )
UpperCAmelCase_ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The path of the test file'''} )
UpperCAmelCase_ : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase_ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase_ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase_ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A , A , A , A : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase_ ) -> Dict:
A : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A : Optional[int] = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A : str = trainer.evaluate()
A : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main()
| 542
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "x" , lowerCamelCase_ = 10**-10 , lowerCamelCase_ = 1 , ):
A : int = symbols(lowerCamelCase_ )
A : List[str] = lambdify(lowerCamelCase_ , lowerCamelCase_ )
A : Tuple = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) )
A : Optional[int] = starting_point
while True:
if diff_function(lowerCamelCase_ ) != 0:
A : Union[str, Any] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function(
lowerCamelCase_ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : Tuple = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 542
| 1
|
'''simple docstring'''
import os
def lowercase__ ( __lowercase : str = "input.txt" ) -> str:
"""simple docstring"""
with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as input_file:
__UpperCamelCase = [
[int(__lowercase ) for element in line.split(',' )]
for line in input_file.readlines()
]
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(matrix[0] )
__UpperCamelCase = [[-1 for _ in range(__lowercase )] for _ in range(__lowercase )]
for i in range(__lowercase ):
__UpperCamelCase = matrix[i][0]
for j in range(1 , __lowercase ):
for i in range(__lowercase ):
__UpperCamelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowercase ):
__UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 717
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : List[str] , __A : Optional[Any]=1_3 , __A : Any=2 , __A : List[Any]=2_4 , __A : List[str]=1_6 , __A : Tuple=True , __A : int=True , __A : Tuple=3_2 , __A : int=5 , __A : Dict=4 , __A : Any=3_7 , __A : Optional[Any]="gelu" , __A : List[Any]=0.1 , __A : str=0.1 , __A : Dict=1_0 , __A : Any=0.02 , __A : Optional[Any]=None , __A : Dict=2 , __A : Optional[int]=2 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = patch_size
__UpperCamelCase = max_length
__UpperCamelCase = num_mel_bins
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = frequency_stride
__UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase = frequency_out_dimension * time_out_dimension
__UpperCamelCase = num_patches + 2
def _lowerCamelCase ( self : int ):
__UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : str ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : List[str] , __A : str , __A : Dict , __A : Union[str, Any] ):
__UpperCamelCase = ASTModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Dict =False
def _lowerCamelCase ( self : Dict , __A : Optional[int] , __A : Optional[int] , __A : Tuple , __A : Optional[int] , __A : Optional[Any] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = ASTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _lowerCamelCase ( self : int ):
pass
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , __A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def _lowerCamelCase ( self : Tuple ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ASTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase , __UpperCamelCase = torchaudio.load(__lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : Tuple ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__A )
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase = prepare_audio()
__UpperCamelCase = audio.squeeze().numpy()
__UpperCamelCase = feature_extractor(__A , sampling_rate=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__A )
# verify the logits
__UpperCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 434
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE :Dict = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "esm"
def __init__( self : List[str] ,A : Dict=None ,A : Tuple=None ,A : Any=None ,A : Optional[Any]=7_68 ,A : Tuple=12 ,A : List[str]=12 ,A : Tuple=30_72 ,A : List[str]=0.1 ,A : List[Any]=0.1 ,A : int=10_26 ,A : List[str]=0.02 ,A : Union[str, Any]=1E-12 ,A : List[Any]="absolute" ,A : List[Any]=True ,A : Union[str, Any]=None ,A : Optional[int]=False ,A : Dict=False ,A : Tuple=None ,A : Optional[int]=None ,**A : List[Any] ,):
super().__init__(pad_token_id=A ,mask_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = emb_layer_norm_before
__A = token_dropout
__A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__A = EsmFoldConfig()
elif isinstance(A ,A ):
__A = EsmFoldConfig(**A )
__A = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__A = get_default_vocab_list()
else:
__A = vocab_list
else:
__A = None
__A = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,A ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCamelCase_ ( self : Optional[int] ):
__A = super().to_dict()
if isinstance(self.esmfold_config ,A ):
__A = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.trunk is None:
__A = TrunkConfig()
elif isinstance(self.trunk ,A ):
__A = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = asdict(self )
__A = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 48
snake_case_ = 1024
snake_case_ = 128
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.structure_module is None:
__A = StructureModuleConfig()
elif isinstance(self.structure_module ,A ):
__A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__A = self.sequence_state_dim // self.sequence_head_width
__A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCamelCase_ ( self : Tuple ):
__A = asdict(self )
__A = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 384
snake_case_ = 128
snake_case_ = 16
snake_case_ = 128
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1E-8
snake_case_ = 1E5
def UpperCamelCase_ ( self : Union[str, Any] ):
return asdict(self )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[str] = DDIMPipeline
__magic_name__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__magic_name__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
__magic_name__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__magic_name__ : Any = False
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
a_ =DDIMScheduler()
a_ ={"unet": unet, "scheduler": scheduler}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
a_ ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_)
a_ =pipe(**lowerCAmelCase_).images
a_ =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3))
a_ =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04])
a_ =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def lowercase_ ( self) -> Any:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ="google/ddpm-cifar10-32"
a_ =UNetaDModel.from_pretrained(lowerCAmelCase_)
a_ =DDIMScheduler()
a_ =DDIMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_)
ddim.to(lowerCAmelCase_)
ddim.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.manual_seed(0)
a_ =ddim(generator=lowerCAmelCase_ , eta=0.0 , output_type="numpy").images
a_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a_ =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ="google/ddpm-ema-bedroom-256"
a_ =UNetaDModel.from_pretrained(lowerCAmelCase_)
a_ =DDIMScheduler.from_pretrained(lowerCAmelCase_)
a_ =DDIMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_)
ddpm.to(lowerCAmelCase_)
ddpm.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.manual_seed(0)
a_ =ddpm(generator=lowerCAmelCase_ , output_type="numpy").images
a_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
a_ =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
|
from math import factorial
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
a__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 335
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a_ ( _A , _A , _A ) -> Dict:
"""simple docstring"""
snake_case__ = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
snake_case__ = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(_A ):
os.makedirs(_A )
snake_case__ = model.state_dict()
def to_tf_var_name(_A ):
for patt, repl in iter(_A ):
snake_case__ = name.replace(_A , _A )
return f'''bert/{name}'''
def create_tf_var(_A , _A , _A ):
snake_case__ = tf.dtypes.as_dtype(tensor.dtype )
snake_case__ = tf.get_variable(dtype=_A , shape=tensor.shape , name=_A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case__ = to_tf_var_name(_A )
snake_case__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case__ = torch_tensor.T
snake_case__ = create_tf_var(tensor=_A , name=_A , session=_A )
tf.keras.backend.set_value(_A , _A )
snake_case__ = session.run(_A )
print(f'''Successfully created {tf_name}: {np.allclose(_A , _A )}''' )
snake_case__ = tf.train.Saver(tf.trainable_variables() )
saver.save(_A , os.path.join(_A , model_name.replace('-' , '_' ) + '.ckpt' ) )
def a_ ( _A=None ) -> List[Any]:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_A , required=_A , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=_A , default=_A , required=_A , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=_A , required=_A , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=_A , required=_A , help='Directory in which to save tensorflow model' )
snake_case__ = parser.parse_args(_A )
snake_case__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 372
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : int = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 372
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> float:
"""simple docstring"""
__UpperCAmelCase : List[Any] = 0
while len(UpperCamelCase ) > 1:
__UpperCAmelCase : Optional[int] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__UpperCAmelCase : Dict = files.index(min(UpperCamelCase ) )
temp += files[min_index]
files.pop(UpperCamelCase )
files.append(UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =set()
# Replace all the whitespace in our sentence
_UpperCAmelCase : Dict =input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 2_6
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[False] * 2_6
for char in input_str:
if char.islower():
_UpperCAmelCase : Dict =True
elif char.isupper():
_UpperCAmelCase : List[str] =True
return all(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def lowerCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
_UpperCAmelCase : List[Any] ='from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__lowerCamelCase ) )
print(timeit('is_pangram_faster()' , setup=__lowerCamelCase ) )
print(timeit('is_pangram_fastest()' , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 446
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : int = 16
a : Tuple = 32
def A__ ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 , _UpperCAmelCase : str = "bert-base-cased" ) -> Any:
__snake_case = AutoTokenizer.from_pretrained(_UpperCAmelCase )
__snake_case = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__snake_case = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> str:
model.eval()
__snake_case = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**_UpperCAmelCase )
__snake_case = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__snake_case , __snake_case = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_UpperCAmelCase ) - 1:
__snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__snake_case = metric.compute()
return eval_metric["accuracy"]
def A__ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Union[str, Any]:
# Initialize accelerator
__snake_case = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config["lr"]
__snake_case = int(config["num_epochs"] )
__snake_case = int(config["seed"] )
__snake_case = int(config["batch_size"] )
__snake_case = args.model_name_or_path
set_seed(_UpperCAmelCase )
__snake_case , __snake_case = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase )
# Instantiate optimizer
__snake_case = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__snake_case = optimizer_cls(params=model.parameters() , lr=_UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__snake_case = 1
__snake_case = (len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__snake_case = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=0 , num_training_steps=_UpperCAmelCase , )
else:
__snake_case = DummyScheduler(_UpperCAmelCase , total_num_steps=_UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__snake_case = 0
# We also need to keep track of the stating epoch so files are named properly
__snake_case = 0
__snake_case = evaluate.load("glue" , "mrpc" )
__snake_case = num_epochs
if args.partial_train_epoch is not None:
__snake_case = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__snake_case = args.resume_from_checkpoint.split("epoch_" )[1]
__snake_case = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__snake_case = int(_UpperCAmelCase ) + 1
__snake_case = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.print("resumed checkpoint performance:" , _UpperCAmelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , "r" ) as f:
__snake_case = json.load(_UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__snake_case = {}
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
__snake_case = model(**_UpperCAmelCase )
__snake_case = outputs.loss
__snake_case = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__snake_case = F'''epoch_{epoch}'''
__snake_case = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
__snake_case = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = accuracy
__snake_case = lr_scheduler.get_lr()[0]
__snake_case = optimizer.param_groups[0]["lr"]
__snake_case = epoch
__snake_case = overall_step
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def A__ ( ) -> Tuple:
__snake_case = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_UpperCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_UpperCAmelCase , )
parser.add_argument(
"--output_dir" , type=_UpperCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_UpperCAmelCase , default=2 , help="Number of train epochs." , )
__snake_case = parser.parse_args()
__snake_case = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
snake_case : Tuple = 6_3_7_8_1_3_7.0
snake_case : Any = 6_3_5_6_7_5_2.3_1_4_2_4_5
snake_case : int = 6_3_7_8_1_3_7
def A ( __snake_case: float , __snake_case: float , __snake_case: float , __snake_case: float ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
__magic_name__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__ = haversine_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__ = (b_lata + b_lata) / 2
__magic_name__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__ = (sin(_SCREAMING_SNAKE_CASE ) ** 2) * (cos(_SCREAMING_SNAKE_CASE ) ** 2)
__magic_name__ = cos(sigma / 2 ) ** 2
__magic_name__ = (sigma - sin(_SCREAMING_SNAKE_CASE )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__ = (cos(_SCREAMING_SNAKE_CASE ) ** 2) * (sin(_SCREAMING_SNAKE_CASE ) ** 2)
__magic_name__ = sin(sigma / 2 ) ** 2
__magic_name__ = (sigma + sin(_SCREAMING_SNAKE_CASE )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 545
|
'''simple docstring'''
from math import isqrt
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def __lowercase (_SCREAMING_SNAKE_CASE :int = 10**6 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 507
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''pixel_values''']
def __init__( self , _a = True , _a = None , _a = 0.9 , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = 1 / 255 , _a = True , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
lowerCAmelCase_ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase_ = get_size_dict(_a , default_to_square=_a )
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase_ = get_size_dict(_a , param_name="crop_size" )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = crop_pct
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __a ( self , _a , _a , _a = None , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase_ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase_ = int(size["height"] / crop_pct )
else:
lowerCAmelCase_ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(_a ) )
lowerCAmelCase_ = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
else:
if "shortest_edge" in size:
lowerCAmelCase_ = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
elif "height" in size and "width" in size:
lowerCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(_a ) )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __a ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __a ( self , _a , _a , _a = None , **_a , ) -> Union[str, Any]:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(_a , default_to_square=_a )
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(_a , param_name="crop_size" )
lowerCAmelCase_ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCAmelCase_ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
| 715
|
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226
| 0
|
import requests
lowerCAmelCase : Optional[Any] = '''''' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase : int = '''https://api.openweathermap.org/data/2.5/'''
def _lowercase ( __UpperCamelCase : str = "Chicago" , __UpperCamelCase : str = APPID ):
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def _lowercase ( __UpperCamelCase : str = "Kolkata, India" , __UpperCamelCase : str = APPID ):
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def _lowercase ( __UpperCamelCase : float = 5_5.6_8 , __UpperCamelCase : float = 1_2.5_7 , __UpperCamelCase : str = APPID ):
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase : List[Any] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 214
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase__ : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
UpperCamelCase__ : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
UpperCamelCase__ : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1_0_0_0_0 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
UpperCamelCase__ : Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
UpperCamelCase__ : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
UpperCamelCase__ : Optional[int] = field(
default=7_5_0 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1_6 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
UpperCamelCase__ : Optional[bool] = field(
default=__a , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
UpperCamelCase__ : Optional[int] = field(default=5_0_0_0_0 , metadata={'''help''': '''Maximum number of training steps.'''} )
UpperCamelCase__ : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase__ : Optional[int] = field(default=1_0_2_4 , metadata={'''help''': '''Sequence lengths used for training.'''} )
UpperCamelCase__ : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1_0_2_4 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
UpperCamelCase__ : Optional[str] = field(
default=__a , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
UpperCamelCase__ : Optional[bool] = field(default=__a , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase__ : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
UpperCamelCase__ : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase__ : Optional[int] = field(default=1_0_2_4 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
UpperCamelCase__ : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase__ : Optional[int] = field(default=__a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
UpperCamelCase__ : Optional[int] = field(
default=__a , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
UpperCamelCase__ : Optional[bool] = field(
default=__a , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
UpperCamelCase__ : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
UpperCamelCase__ : Optional[int] = field(default=2_5_6 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
UpperCamelCase__ : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
UpperCamelCase__ : Optional[float] = field(default=0.9_5 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
UpperCamelCase__ : Optional[int] = field(default=1_0 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
UpperCamelCase__ : Optional[int] = field(
default=2_0_0 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
UpperCamelCase__ : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
UpperCamelCase__ : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[int] = field(
default=__a , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
UpperCamelCase__ : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1_0_0_0_0_0 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
UpperCamelCase__ : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase__ : Optional[float] = field(
default=1_0_0_0 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
UpperCamelCase__ : Optional[float] = field(
default=1_0_0 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.2_5 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
UpperCamelCase__ : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
UpperCamelCase__ : Optional[bool] = field(
default=__a , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.8_5 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
UpperCamelCase__ : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase__ : Optional[int] = field(default=2_0_0_0_0_0 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
UpperCamelCase__ : Optional[int] = field(
default=3_2_7_6_8 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
UpperCamelCase__ : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
UpperCamelCase__ : Optional[bool] = field(default=__a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
UpperCamelCase__ : Optional[int] = field(default=__a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
UpperCamelCase__ : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
UpperCamelCase__ : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
UpperCamelCase__ : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
UpperCamelCase__ : Optional[bool] = field(default=__a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 214
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a ( __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 'vit_msn'
def __init__( self: Union[str, Any] , UpperCamelCase: Any=7_68 , UpperCamelCase: str=12 , UpperCamelCase: Dict=12 , UpperCamelCase: int=30_72 , UpperCamelCase: Optional[int]="gelu" , UpperCamelCase: Tuple=0.0 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Optional[Any]=0.02 , UpperCamelCase: List[str]=1e-0_6 , UpperCamelCase: Union[str, Any]=2_24 , UpperCamelCase: int=16 , UpperCamelCase: Optional[Any]=3 , UpperCamelCase: Dict=True , **UpperCamelCase: Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
| 714
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.dummy_uncond_unet
A__ = PNDMScheduler()
A__ = PNDMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pndm.to(UpperCamelCase )
pndm.set_progress_bar_config(disable=UpperCamelCase )
A__ = torch.manual_seed(0 )
A__ = pndm(generator=UpperCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
A__ = torch.manual_seed(0 )
A__ = pndm(generator=UpperCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCamelCase )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = """google/ddpm-cifar10-32"""
A__ = UNetaDModel.from_pretrained(UpperCamelCase )
A__ = PNDMScheduler()
A__ = PNDMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pndm.to(UpperCamelCase )
pndm.set_progress_bar_config(disable=UpperCamelCase )
A__ = torch.manual_seed(0 )
A__ = pndm(generator=UpperCamelCase , output_type="""numpy""" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 500
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias"))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
])
return rename_keys
def lowerCamelCase( a__ ,a__):
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight")
_SCREAMING_SNAKE_CASE =in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =dct.pop(a__)
_SCREAMING_SNAKE_CASE =val
def lowerCamelCase( a__):
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE ='''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE ='''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw).convert('''RGB''')
return im
@torch.no_grad()
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =ViTConfig(image_size=384 ,qkv_bias=a__)
_SCREAMING_SNAKE_CASE =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE =768
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE =1024
_SCREAMING_SNAKE_CASE =4096
_SCREAMING_SNAKE_CASE =24
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''')
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE ='''relu'''
_SCREAMING_SNAKE_CASE =1024
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
# load HuggingFace model
_SCREAMING_SNAKE_CASE =ViTModel(a__ ,add_pooling_layer=a__)
_SCREAMING_SNAKE_CASE =TrOCRForCausalLM(a__)
_SCREAMING_SNAKE_CASE =VisionEncoderDecoderModel(encoder=a__ ,decoder=a__)
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(a__ ,map_location='''cpu''' ,check_hash=a__)['''model''']
_SCREAMING_SNAKE_CASE =create_rename_keys(a__ ,a__)
for src, dest in rename_keys:
rename_key(a__ ,a__ ,a__)
read_in_q_k_v(a__ ,a__)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE =state_dict.pop(a__)
if key.startswith('''decoder''') and "output_projection" not in key:
_SCREAMING_SNAKE_CASE =val
else:
_SCREAMING_SNAKE_CASE =val
# load state dict
model.load_state_dict(a__)
# Check outputs on an image
_SCREAMING_SNAKE_CASE =ViTImageProcessor(size=encoder_config.image_size)
_SCREAMING_SNAKE_CASE =RobertaTokenizer.from_pretrained('''roberta-large''')
_SCREAMING_SNAKE_CASE =TrOCRProcessor(a__ ,a__)
_SCREAMING_SNAKE_CASE =processor(images=prepare_img(a__) ,return_tensors='''pt''').pixel_values
# verify logits
_SCREAMING_SNAKE_CASE =torch.tensor([[model.config.decoder.decoder_start_token_id]])
_SCREAMING_SNAKE_CASE =model(pixel_values=a__ ,decoder_input_ids=a__)
_SCREAMING_SNAKE_CASE =outputs.logits
_SCREAMING_SNAKE_CASE =torch.Size([1, 1, 5_0265])
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311])
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170])
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210])
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,a__ ,atol=1e-3), "First elements of logits not as expected"
Path(a__).mkdir(exist_ok=a__)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a__)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case_ : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 691
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 691
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = '''gelu'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_3 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=False ,lowerCamelCase_=9_9 ,lowerCamelCase_=3_2 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=2_0 ,lowerCamelCase_=2 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=4 ,) -> Optional[int]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
A = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
A = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
A = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
A = tf.concat([input_ids, eos_tensor] ,axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,)
A = prepare_led_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = tf.concat(
[tf.zeros_like(lowerCamelCase_ )[:, :-1], tf.ones_like(lowerCamelCase_ )[:, -1:]] ,axis=-1 ,)
A = global_attention_mask
return config, inputs_dict
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = TFLEDModel(config=lowerCamelCase_ ).get_decoder()
A = inputs_dict["""input_ids"""]
A = input_ids[:1, :]
A = inputs_dict["""attention_mask"""][:1, :]
A = 1
# first forward pass
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
A , A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] ,axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1E-3 )
def _A ( _a : str , _a : List[Any] , _a : Tuple , _a : str=None , _a : Union[str, Any]=None , _a : Optional[int]=None , _a : str=None , ):
"""simple docstring"""
if attention_mask is None:
A = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> List[Any]:
A = TFLEDModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = tf.zeros_like(inputs_dict["""attention_mask"""] )
A = 2
A = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict["""global_attention_mask"""] ,)
A = True
A = self.model_tester.seq_length
A = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase_ ):
A = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
def check_encoder_attentions_output(lowerCamelCase_ ):
A = [t.numpy() for t in outputs.encoder_attentions]
A = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,)
for model_class in self.all_model_classes:
A = True
A = False
A = False
A = model_class(lowerCamelCase_ )
A = model(self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
A = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states ,lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
A = model_class(lowerCamelCase_ )
A = model(self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states ,lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(lowerCamelCase_ )
A = model(self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states ,lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(lowerCamelCase_ )
A = model(self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states ,lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def _A ( _a : Dict ):
"""simple docstring"""
return tf.constant(_a , dtype=tf.intaa )
UpperCAmelCase =1E-4
@slow
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
A = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
A = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
A = prepare_led_inputs_dict(model.config ,lowerCamelCase_ ,lowerCamelCase_ )
A = model(**lowerCamelCase_ )[0]
A = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape ,lowerCamelCase_ )
# change to expected output here
A = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,lowerCamelCase_ ,atol=1E-3 )
def UpperCamelCase__ ( self ) -> str:
A = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
A = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
A = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
A = prepare_led_inputs_dict(model.config ,lowerCamelCase_ ,lowerCamelCase_ )
A = model(**lowerCamelCase_ )[0]
A = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape ,lowerCamelCase_ )
# change to expected output here
A = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,lowerCamelCase_ ,atol=1E-3 ,rtol=1E-3 )
| 721
|
"""simple docstring"""
def _A ( _a : int , _a : int ):
"""simple docstring"""
while a != 0:
A , A = b % a, a
return b
def _A ( _a : int , _a : int ):
"""simple docstring"""
if gcd(_a , _a ) != 1:
A = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_a )
A , A , A = 1, 0, a
A , A , A = 0, 1, m
while va != 0:
A = ua // va
A , A , A , A , A , A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 255
| 0
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
__UpperCAmelCase = len(lowerCAmelCase )
__UpperCAmelCase = []
for i in range(len(lowerCAmelCase ) - pat_len + 1 ):
__UpperCAmelCase = True
for j in range(lowerCAmelCase ):
if s[i + j] != pattern[j]:
__UpperCAmelCase = False
break
if match_found:
position.append(lowerCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 396
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_UpperCamelCase : str = logging.get_logger(__name__)
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[Any] ,*a: Dict ,**a: Union[str, Any] ):
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,a ,)
super().__init__(*a ,**a )
| 396
| 1
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( snake_case__ : str , snake_case__ : str , **snake_case__ : Optional[int] ):
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case__ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 721
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowercase ( unittest.TestCase ):
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : Optional[int] = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ : Optional[int] = jnp.array([tokenizer.encode(A )] )
UpperCAmelCase__ : str = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : List[str] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
UpperCAmelCase__ : List[str] = model(A )["""last_hidden_state"""]
self.assertEqual(output.shape ,A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,A ,atol=1e-3 ) )
| 65
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__A : int = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple ):
'''simple docstring'''
inspect_dataset(A__ , A__ )
lowerCAmelCase_ : Any = path + """.py"""
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def UpperCamelCase_ ( A__ : Tuple , A__ : str ):
'''simple docstring'''
inspect_metric(A__ , A__ )
lowerCAmelCase_ : Any = path + """.py"""
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : List[str] , A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = get_dataset_config_info(A__ , config_name=A__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : List[str] , A__ : int ):
'''simple docstring'''
with pytest.raises(A__ ):
get_dataset_config_info(A__ , config_name=A__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def UpperCamelCase_ ( A__ : str , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_dataset_config_names(A__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Dict , A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = get_dataset_infos(A__ )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase_ : Optional[int] = expected_configs[0]
assert expected_config in infos
lowerCAmelCase_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_dataset_infos(A__ )
assert expected_config in infos
lowerCAmelCase_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : Any , A__ : Dict ):
'''simple docstring'''
with pytest.raises(A__ ):
get_dataset_split_names(A__ , config_name=A__ )
| 275
| 0
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = n + 1 # maximum limit
for a in range(2 , __UpperCamelCase ):
for b in range(2 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = a**b # calculates the current power
collect_powers.add(__UpperCamelCase ) # adds the result to the set
return len(__UpperCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 379
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
| 1
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A ( __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
A__ = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
A__ = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 9
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class a__ ( _a , unittest.TestCase ):
snake_case_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("albert-base-v2" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowercase__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
lowercase__ = (1, 11, 768)
self.assertEqual(output.shape, _UpperCAmelCase )
lowercase__ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], _UpperCAmelCase, atol=1E-4 ) )
| 668
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668
| 1
|
from itertools import permutations
def _lowerCamelCase ( __lowerCamelCase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(__lowerCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _lowerCamelCase ( __lowerCamelCase = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(__lowerCamelCase , __lowerCamelCase ) ) )
for num in permutations(range(__lowerCamelCase ) )
if is_substring_divisible(__lowerCamelCase ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 442
| 0
|
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = len(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase__ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_A : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 709
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[str] = LEDTokenizer
_UpperCAmelCase : Dict = LEDTokenizerFast
_UpperCAmelCase : str = True
def __lowerCamelCase ( self : int ) ->Dict:
super().setUp()
lowerCamelCase__ : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase__ : Union[str, Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : List[Any] , **A : Optional[Any] ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : List[Any] , **A : Optional[int] ) ->int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Dict:
return "lower newer", "lower newer"
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase__ : Tuple = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : str = tokenizer(A , max_length=len(A ) , padding=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase__ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : int = tokenizer(A , padding=A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , A )
self.assertIn('''attention_mask''' , A )
self.assertNotIn('''labels''' , A )
self.assertNotIn('''decoder_attention_mask''' , A )
@require_torch
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Dict = tokenizer(text_target=A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCamelCase ( self : List[str] ) ->Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Any = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=A , truncation=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ : List[str] = ['''A long paragraph for summarization.''']
lowerCamelCase__ : int = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Union[str, Any] = tokenizer(A , return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=A , return_tensors='''pt''' )
lowerCamelCase__ : Tuple = inputs['''input_ids''']
lowerCamelCase__ : Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Any = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase__ : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase__ : Any = tokenizer(A , padding=A )
lowerCamelCase__ : List[Any] = [[0] * len(A ) for x in encoded_output['''input_ids''']]
lowerCamelCase__ : List[Any] = tokenizer.pad(A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , A )
def __lowerCamelCase ( self : int ) ->str:
pass
def __lowerCamelCase ( self : str ) ->Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : List[Any] = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase__ : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 130
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 4 ) -> list[list[int]]:
_A = abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
_A = [list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
_A = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
_A = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> None:
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCAmelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCAmelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 2
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = 'bit'
__lowerCamelCase = ['preactivation', 'bottleneck']
__lowerCamelCase = ['SAME', 'VALID']
def __init__( self :Dict , _lowercase :List[Any]=3 , _lowercase :List[Any]=64 , _lowercase :int=[2_56, 5_12, 10_24, 20_48] , _lowercase :int=[3, 4, 6, 3] , _lowercase :Dict="preactivation" , _lowercase :Optional[int]="relu" , _lowercase :Optional[Any]=None , _lowercase :List[str]=32 , _lowercase :str=0.0 , _lowercase :str=False , _lowercase :Union[str, Any]=32 , _lowercase :Dict=1 , _lowercase :Union[str, Any]=None , _lowercase :List[Any]=None , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase__ = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
lowercase__ = num_channels
lowercase__ = embedding_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = layer_type
lowercase__ = hidden_act
lowercase__ = global_padding
lowercase__ = num_groups
lowercase__ = drop_path_rate
lowercase__ = embedding_dynamic_padding
lowercase__ = output_stride
lowercase__ = width_factor
lowercase__ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(_lowercase ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 611
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
if self.train_file is not None:
lowercase__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _A ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , "r" , encoding="utf-8" ) as f:
lowercase__ = [json.loads(__magic_name__ ) for line in f.read().splitlines() if (len(__magic_name__ ) > 0 and not line.isspace())]
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase__ = {c: dataset[c] for c in dataset.column_names}
lowercase__ = refs
return Dataset.from_dict(__magic_name__ )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase__ = {}
if data_args.train_file is not None:
lowercase__ = data_args.train_file
if data_args.validation_file is not None:
lowercase__ = data_args.validation_file
lowercase__ = data_args.train_file.split("." )[-1]
if extension == "txt":
lowercase__ = "text"
lowercase__ = load_dataset(__magic_name__ , data_files=__magic_name__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowercase__ = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase__ = AutoModelForMaskedLM.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase__ = datasets["train"].column_names
else:
lowercase__ = datasets["validation"].column_names
lowercase__ = "text" if "text" in column_names else column_names[0]
lowercase__ = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ ):
# Remove empty lines
lowercase__ = [line for line in examples["text"] if len(__magic_name__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=__magic_name__ , truncation=__magic_name__ , max_length=data_args.max_seq_length )
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase__ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase__ = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase__ = DataCollatorForWholeWordMask(tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
lowercase__ = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase__ = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output["eval_loss"] )
lowercase__ = perplexity
lowercase__ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 611
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
a__ : int = '2020.9.26'
a__ : Optional[Any] = 'xcodz-dot, cclaus, dhruvmanila'
def __snake_case ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in locals().values() ):
UpperCAmelCase = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = ((x * distance) / (z + distance)) * scale
UpperCAmelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __snake_case ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase = (
'''Input values except axis must either be float or int: '''
f"{list(input_variables.values() )}"
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase = x * math.cos(SCREAMING_SNAKE_CASE_ ) - y * math.sin(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = y * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = z
elif axis == "x":
UpperCAmelCase = y * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = z * math.cos(SCREAMING_SNAKE_CASE_ ) + y * math.sin(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = x
elif axis == "y":
UpperCAmelCase = x * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = z * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def a__ ( self ) -> str:
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = 5
# Realm tok
_lowerCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : int = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
_lowerCamelCase : Any = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
def a__ ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def a__ ( self ) -> int:
_lowerCamelCase : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_lowercase , )
return block_records
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a__ ( self ) -> int:
_lowerCamelCase : int = self.get_config()
_lowerCamelCase : Optional[Any] = self.get_dummy_retriever()
_lowerCamelCase : Union[str, Any] = retriever.tokenizer
_lowerCamelCase : Any = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase : Any = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : str = tokenizer(
['''the fourth'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : List[str] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def a__ ( self ) -> int:
_lowerCamelCase : str = self.get_config()
_lowerCamelCase : Union[str, Any] = self.get_dummy_retriever()
_lowerCamelCase : str = retriever.tokenizer
_lowerCamelCase : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase : List[Any] = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : Optional[Any] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _lowercase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowercase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase : Optional[Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 434
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41
| 1
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase_ = get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Saving model to {output_model_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A = os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(f'Saving model to {ckpt_dir}' )
__A = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A = (
os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__A = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCamelCase , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , planner=DefaultLoadPlanner() , )
__A = state_dict['''model''']
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A = FSDP.optim_state_dict(__UpperCamelCase , __UpperCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__A = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__A = os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__A = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__A = (
os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__A = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , )
__A = optim_state['''optimizer''']
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__A = FSDP.optim_state_dict_to_load(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
optimizer.load_state_dict(__UpperCamelCase )
| 215
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = original_name.split('''.''' )[0]
__A = key.split('''.''' )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
__A = orig_block_num - offset
__A = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = OrderedDict()
__A , __A = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__A = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__A = key[: key.find('''proj''' )]
__A = key.replace(__UpperCamelCase , f'patch_embeddings.{total_embed_found}.' )
__A = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__A = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__A = key.replace('''head''' , '''classifier''' )
__A = value
return new_state_dict
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = PoolFormerConfig()
# set attributes based on model_name
__A = '''huggingface/label-files'''
__A = model_name[-3:]
__A = 1_0_0_0
__A = '''imagenet-1k-id2label.json'''
__A = (1, 1_0_0_0)
# set config attributes
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
if size == "s12":
__A = [2, 2, 6, 2]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s24":
__A = [4, 4, 1_2, 4]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s36":
__A = [6, 6, 1_8, 6]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 1e-6
__A = 0.9
elif size == "m36":
__A = [6, 6, 1_8, 6]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
elif size == "m48":
__A = [8, 8, 2_4, 8]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
__A = prepare_img()
__A = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
__A = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
__A = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__A = model(__UpperCamelCase )
__A = outputs.logits
# define expected logit slices for different models
if size == "s12":
__A = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__A = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__A = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__A = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__A = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 215
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : List[str] = ['pixel_values']
def __init__( self :Tuple ,_UpperCamelCase :bool = True ,_UpperCamelCase :Dict[str, int] = None ,_UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC ,_UpperCamelCase :bool = True ,_UpperCamelCase :Dict[str, int] = None ,_UpperCamelCase :bool = True ,_UpperCamelCase :Union[int, float] = 1 / 2_5_5 ,_UpperCamelCase :bool = True ,_UpperCamelCase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,_UpperCamelCase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**_UpperCamelCase :Tuple ,):
super().__init__(**_UpperCamelCase )
snake_case_ : Dict = size if size is not None else {"""shortest_edge""": 2_2_4}
snake_case_ : List[Any] = get_size_dict(_UpperCamelCase ,default_to_square=_UpperCamelCase )
snake_case_ : Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case_ : Optional[int] = get_size_dict(_UpperCamelCase ,param_name="""crop_size""" )
snake_case_ : List[str] = do_resize
snake_case_ : Tuple = size
snake_case_ : int = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : str = crop_size
snake_case_ : Dict = do_rescale
snake_case_ : str = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self :Optional[int] ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Dict[str, int] ,_UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :List[str] ,):
snake_case_ : Tuple = get_size_dict(_UpperCamelCase ,default_to_square=_UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ : List[str] = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
snake_case_ : int = get_resize_output_image_size(_UpperCamelCase ,size=_UpperCamelCase ,default_to_square=_UpperCamelCase )
snake_case_ : Union[str, Any] = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_UpperCamelCase ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=_UpperCamelCase ,data_format=_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :str ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Dict[str, int] ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :Dict ,):
snake_case_ : Tuple = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_UpperCamelCase ,size=(size["""height"""], size["""width"""]) ,data_format=_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Union[int, float] ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :Tuple ,):
return rescale(_UpperCamelCase ,scale=_UpperCamelCase ,data_format=_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Any ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Union[float, List[float]] ,_UpperCamelCase :Union[float, List[float]] ,_UpperCamelCase :Optional[Union[str, ChannelDimension]] = None ,**_UpperCamelCase :List[Any] ,):
return normalize(_UpperCamelCase ,mean=_UpperCamelCase ,std=_UpperCamelCase ,data_format=_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :ImageInput ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[Dict[str, int]] = None ,_UpperCamelCase :PILImageResampling = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[Dict[str, int]] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[float] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[Union[float, Iterable[float]]] = None ,_UpperCamelCase :Optional[Union[float, Iterable[float]]] = None ,_UpperCamelCase :Optional[TensorType] = None ,_UpperCamelCase :ChannelDimension = ChannelDimension.FIRST ,**_UpperCamelCase :Optional[int] ,):
snake_case_ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Dict = image_mean if image_mean is not None else self.image_mean
snake_case_ : str = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_UpperCamelCase ,default_to_square=_UpperCamelCase )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Dict = get_size_dict(_UpperCamelCase ,param_name="""crop_size""" )
snake_case_ : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
snake_case_ : Tuple = [self.resize(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) for image in images]
if do_center_crop:
snake_case_ : Dict = [self.center_crop(_UpperCamelCase ,_UpperCamelCase ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(_UpperCamelCase ,_UpperCamelCase ) for image in images]
if do_normalize:
snake_case_ : Any = [self.normalize(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_UpperCamelCase ,_UpperCamelCase ) for image in images]
snake_case_ : Any = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase ,tensor_type=_UpperCamelCase )
| 334
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( lowercase__ , lowercase__ ):
lowercase : Tuple = 1
@register_to_config
def __init__( self :Optional[Any] ,_UpperCamelCase :Tuple=2_0_0_0 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :Optional[int]=2_0 ,_UpperCamelCase :Any=1E-3 ):
snake_case_ : int = None
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
def a__ ( self :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, torch.device] = None ):
snake_case_ : List[Any] = torch.linspace(1 ,self.config.sampling_eps ,_UpperCamelCase ,device=_UpperCamelCase )
def a__ ( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple=None ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case_ : Union[str, Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case_ : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case_ : Tuple = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case_ : Dict = std.unsqueeze(-1 )
snake_case_ : Tuple = -score / std
# compute
snake_case_ : Any = -1.0 / len(self.timesteps )
snake_case_ : Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case_ : Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case_ : Optional[int] = beta_t.unsqueeze(-1 )
snake_case_ : List[str] = -0.5 * beta_t * x
snake_case_ : Tuple = torch.sqrt(_UpperCamelCase )
snake_case_ : Any = drift - diffusion**2 * score
snake_case_ : List[str] = x + drift * dt
# add noise
snake_case_ : List[Any] = randn_tensor(x.shape ,layout=x.layout ,generator=_UpperCamelCase ,device=x.device ,dtype=x.dtype )
snake_case_ : int = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
return self.config.num_train_timesteps
| 334
| 1
|
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : list ) -> list:
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
lowerCAmelCase__ = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ ,lowerCAmelCase__ = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ ,lowerCAmelCase__ = arr[k - 1], arr[0]
generate(k - 1 , UpperCAmelCase_ )
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 211
|
'''simple docstring'''
import math
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> bool:
return math.sqrt(UpperCAmelCase_ ) * math.sqrt(UpperCAmelCase_ ) == num
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> bool:
lowerCAmelCase__ = 0
lowerCAmelCase__ = n
while left <= right:
lowerCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 1
|
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) )
| 39
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict=13 ,__lowerCAmelCase: Optional[int]=30 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: Tuple=37 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: List[str]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Optional[int]=0.6 ,__lowerCAmelCase: str=None ,):
'''simple docstring'''
_lowerCamelCase : Tuple = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Any = patch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Any = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Tuple = mask_ratio
_lowerCamelCase : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Any ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFViTMAEModel(config=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFViTMAEForPreTraining(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
# expected sequence length = num_patches
_lowerCamelCase : int = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : str = TFViTMAEForPreTraining(__lowerCAmelCase )
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Dict = model(__lowerCAmelCase ,training=__lowerCAmelCase )
_lowerCamelCase : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(_lowerCamelCase) : str = config_and_inputs
_lowerCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase__ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFViTMAEModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Layer ) )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Tuple = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : Any = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs_dict[0].numpy()
_lowerCamelCase : List[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 )
def _lowercase ( self: Any ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCAmelCase: Dict ):
_lowerCamelCase : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : int = v.numpy()
else:
_lowerCamelCase : Tuple = np.array(__lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = prepare_numpy_arrays(__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : str = model(**__lowerCAmelCase ,noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : str = tf.constant(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Any = tf_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCAmelCase )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(__lowerCAmelCase ,__lowerCAmelCase ),)
if isinstance(__lowerCAmelCase ,__lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCAmelCase ,"_keras_serializable" ,__lowerCAmelCase )
}
_lowerCamelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Any = tf.convert_to_tensor(__lowerCAmelCase )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase : List[Any] = main_layer_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase : Dict = tf.keras.Model(__lowerCAmelCase ,outputs=main_layer(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"keras_model.h5" )
model.save(__lowerCAmelCase )
_lowerCamelCase : List[str] = tf.keras.models.load_model(
__lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCAmelCase ,tf.keras.Model )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase : Any = outputs.last_hidden_state.numpy()
_lowerCamelCase : Optional[Any] = 0
else:
_lowerCamelCase : str = outputs.logits.numpy()
_lowerCamelCase : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase ,saved_model=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase : List[str] = after_outputs["last_hidden_state"].numpy()
_lowerCamelCase : Optional[int] = 0
else:
_lowerCamelCase : List[str] = after_outputs["logits"].numpy()
_lowerCamelCase : str = 0
_lowerCamelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
def _lowercase ( self: Tuple ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,noise=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCAmelCase )
_lowerCamelCase : Dict = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase : Optional[int] = model_class.from_config(model.config )
_lowerCamelCase : List[str] = new_model(__lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase : Optional[Any] = new_model(__lowerCAmelCase ,noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase ,__lowerCAmelCase )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : str = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__lowerCAmelCase ,return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Union[str, Any] = ViTMAEConfig()
_lowerCamelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase : Tuple = model(**__lowerCAmelCase ,noise=__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__lowerCAmelCase ,atol=1e-4 )
| 703
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=_lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=_lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=_lowerCamelCase , help="where to store parsed gold_data_path file" , )
_lowerCamelCase : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_lowerCamelCase : Union[str, Any] = json.load(_lowerCamelCase )
for dpr_record in tqdm(_lowerCamelCase ):
_lowerCamelCase : Tuple = dpr_record["question"]
_lowerCamelCase : List[str] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(_lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 386
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.