code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ :Union[str, Any] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowercase__ :str = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
lowercase__ :Dict = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =VOCAB_FILES_NAMES
lowercase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION
lowercase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any =BertTokenizer
def __init__( self ,A__=None ,A__=None ,A__=True ,A__="[UNK]" ,A__="[SEP]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__=True ,A__=None ,**A__ ,):
super().__init__(
A__ ,tokenizer_file=A__ ,do_lower_case=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,tokenize_chinese_chars=A__ ,strip_accents=A__ ,**A__ ,)
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' ,A__) != do_lower_case
or normalizer_state.get('''strip_accents''' ,A__) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,A__) != tokenize_chinese_chars
):
lowercase = getattr(A__ ,normalizer_state.pop('''type'''))
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**A__)
lowercase = do_lower_case
def A__ ( self ,A__ ,A__=None):
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = self._tokenizer.model.save(A__ ,name=A__)
return tuple(A__)
| 101 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["""OwlViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Tuple = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 103 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : VQModel ,lowercase__ : UNetaDModel ,lowercase__ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : Tuple ,lowercase__ : int = 1 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : float = 0.0 ,lowercase__ : int = 5_0 ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,**lowercase__ : List[str] ,):
__lowercase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=lowercase__ ,)
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__lowercase = self.scheduler.scale_model_input(lowercase__ ,lowercase__ )
# predict the noise residual
__lowercase = self.unet(lowercase__ ,lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
# decode the image latents with the VAE
__lowercase = self.vqvae.decode(lowercase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 104 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Union[str, Any] = tuple[int, int, int]
__UpperCamelCase : str = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__UpperCamelCase : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
__UpperCamelCase : List[Any] = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
__UpperCamelCase : Optional[int] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
__UpperCamelCase : Dict = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
__UpperCamelCase : List[Any] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
__UpperCamelCase : Dict = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
__UpperCamelCase : int = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
__UpperCamelCase : Union[str, Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
__UpperCamelCase : Any = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
__UpperCamelCase : List[str] = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
__UpperCamelCase : Optional[Any] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(A_ ) )) < 3:
lowerCAmelCase__ : str = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(A_ )
# Checks if rotor positions are valid
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = rotpos
if not 0 < rotorposa <= len(A_ ):
lowerCAmelCase__ : Optional[int] = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(A_ )
if not 0 < rotorposa <= len(A_ ):
lowerCAmelCase__ : int = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(A_ )
if not 0 < rotorposa <= len(A_ ):
lowerCAmelCase__ : List[str] = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(A_ )
# Validates string and returns dict
lowerCAmelCase__ : List[Any] = _plugboard(A_ )
return rotpos, rotsel, pbdict
def __SCREAMING_SNAKE_CASE ( A_ ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = f'Plugboard setting isn\'t type string ({type(A_ )})'
raise TypeError(A_ )
elif len(A_ ) % 2 != 0:
lowerCAmelCase__ : List[Any] = f'Odd number of symbols ({len(A_ )})'
raise Exception(A_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
lowerCAmelCase__ : Optional[int] = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ : Dict = f'\'{i}\' not in list of symbols'
raise Exception(A_ )
elif i in tmppbl:
lowerCAmelCase__ : List[Any] = f'Duplicate symbol ({i})'
raise Exception(A_ )
else:
tmppbl.add(A_ )
del tmppbl
# Created the dictionary
lowerCAmelCase__ : Dict = {}
for j in range(0 , len(A_ ) - 1 , 2 ):
lowerCAmelCase__ : str = pbstring[j + 1]
lowerCAmelCase__ : Tuple = pbstring[j]
return pb
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ = (rotora, rotora, rotora) , A_ = "" , ):
lowerCAmelCase__ : List[str] = text.upper()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = _validator(
A_ , A_ , plugb.upper() )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = rotor_position
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ : Optional[int] = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ : Any = abc.index(A_ ) + rotorposa
lowerCAmelCase__ : Optional[int] = rotora[index % len(A_ )]
# rotor rb --------------------------
lowerCAmelCase__ : Any = abc.index(A_ ) + rotorposa
lowerCAmelCase__ : List[str] = rotora[index % len(A_ )]
# rotor rc --------------------------
lowerCAmelCase__ : str = abc.index(A_ ) + rotorposa
lowerCAmelCase__ : Dict = rotora[index % len(A_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ : List[str] = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ : Any = abc[rotora.index(A_ ) - rotorposa]
lowerCAmelCase__ : str = abc[rotora.index(A_ ) - rotorposa]
lowerCAmelCase__ : Tuple = abc[rotora.index(A_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ : List[str] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A_ ):
lowerCAmelCase__ : Any = 0
rotorposa += 1
if rotorposa >= len(A_ ):
lowerCAmelCase__ : Tuple = 0
rotorposa += 1
if rotorposa >= len(A_ ):
lowerCAmelCase__ : Dict = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A_ )
return "".join(A_ )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = '''This is my Python script that emulates the Enigma machine from WWII.'''
__UpperCamelCase : Optional[Any] = (1, 1, 1)
__UpperCamelCase : List[Any] = '''pictures'''
__UpperCamelCase : int = (rotora, rotora, rotora)
__UpperCamelCase : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 106 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __magic_name__ ( A : int ):
'''simple docstring'''
if not isinstance(A, A ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a = precision
a = ceil(precision / 14 )
a = 426880 * Decimal(10005 ).sqrt()
a = 1
a = 13591409
a = Decimal(A )
for k in range(1, A ):
a = factorial(6 * k ) // (factorial(3 * k ) * factorial(A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : Any = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 107 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase : str = is_leaf
lowerCAmelCase : str = prefix
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for word in words:
self.insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase : Optional[Any] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
lowerCAmelCase : Tuple = self.nodes[word[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase : Optional[Any] = remaining_prefix
lowerCAmelCase : int = self.nodes[matching_string[0]]
lowerCAmelCase : List[Any] = RadixNode(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = aux_node
if remaining_word == "":
lowerCAmelCase : Optional[int] = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase : List[str] = list(self.nodes.values() )[0]
lowerCAmelCase : List[str] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase : Tuple = merging_node.nodes
return True
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase : List[str] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = RadixNode()
lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE )
print("Words:" , SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 108 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A: str = logging.get_logger(__name__)
A: Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = 'trajectory_transformer'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=249 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0006 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=50256 , _SCREAMING_SNAKE_CASE=50256 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = action_weight
UpperCAmelCase : List[Any] = reward_weight
UpperCAmelCase : Any = value_weight
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : List[str] = block_size
UpperCAmelCase : List[Any] = action_dim
UpperCAmelCase : List[str] = observation_dim
UpperCAmelCase : str = transition_dim
UpperCAmelCase : Optional[Any] = learning_rate
UpperCAmelCase : Dict = n_layer
UpperCAmelCase : int = n_head
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : List[Any] = embd_pdrop
UpperCAmelCase : List[str] = attn_pdrop
UpperCAmelCase : Optional[Any] = resid_pdrop
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : List[str] = kaiming_initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 109 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[str] = HfApi()
_lowerCamelCase : Tuple = {}
# fmt: off
_lowerCamelCase : Optional[int] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : Optional[Any] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Any = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : List[Any] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : Tuple = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Dict = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith('CompVis'):
_lowerCamelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
_lowerCamelCase : Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 258 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase_ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 0 |
"""simple docstring"""
def __magic_name__ ( lowercase ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 173 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
__A = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class snake_case ( unittest.TestCase ):
@classmethod
def lowercase_ ( cls : List[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = TOKEN
HfFolder.save_token(__lowerCAmelCase)
@classmethod
def lowercase_ ( cls : str)-> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("test-config" , use_auth_token=self._token)
__lowerCAmelCase: Optional[int] = BertConfig.from_pretrained(f"{USER}/test-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , repo_id="test-config" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token)
__lowerCAmelCase: Dict = BertConfig.from_pretrained(f"{USER}/test-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
__lowerCAmelCase: Any = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id="valid_org/test-config-org" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token)
__lowerCAmelCase: Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
def lowercase_ ( self : Any)-> List[str]:
'''simple docstring'''
CustomConfig.register_for_auto_class()
__lowerCAmelCase: str = CustomConfig(attribute=4_2)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=__lowerCAmelCase)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 4_2)
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : int)-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Optional[int] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: str = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}")
self.assertEqual(__lowerCAmelCase , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(__lowerCAmelCase , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(__lowerCAmelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(__lowerCAmelCase , c.summary_type , "mismatch for key: summary_type")
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = PretrainedConfig()
__lowerCAmelCase: Any = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__lowerCAmelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
__lowerCAmelCase: List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(__lowerCAmelCase , __lowerCAmelCase)]
if len(__lowerCAmelCase) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(__lowerCAmelCase)}.")
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
__lowerCAmelCase: Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(__lowerCAmelCase)
def lowercase_ ( self : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = mock.Mock()
__lowerCAmelCase: Optional[Any] = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: str = HTTPError
__lowerCAmelCase: str = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowerCAmelCase) as mock_head:
__lowerCAmelCase: int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
__lowerCAmelCase: str = AutoConfig.from_pretrained("bert-base-cased")
__lowerCAmelCase: Union[str, Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__lowerCAmelCase)
__lowerCAmelCase: List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(__lowerCAmelCase , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Tuple = AutoConfig.from_pretrained(__lowerCAmelCase)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ["config.42.0.0.json"]
__lowerCAmelCase: int = 7_6_8
configuration.save_pretrained(__lowerCAmelCase)
shutil.move(os.path.join(__lowerCAmelCase , "config.4.0.0.json") , os.path.join(__lowerCAmelCase , "config.42.0.0.json"))
__lowerCAmelCase: str = AutoConfig.from_pretrained(__lowerCAmelCase)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__lowerCAmelCase: Any = "v4.0.0"
__lowerCAmelCase , __lowerCAmelCase: List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__lowerCAmelCase , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: Tuple = "v3.0.0"
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(__lowerCAmelCase)
self.assertEqual(old_configuration.hidden_size , 7_6_8)
| 217 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : Tuple=18 ,lowerCamelCase__ : Dict=30 ,lowerCamelCase__ : Optional[int]=400 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : int=[0.5, 0.5, 0.5] ,lowerCamelCase__ : List[Any]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = size if size is not None else {'shortest_edge': 18}
_UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : Dict = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : List[str] = num_channels
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Union[str, Any] = min_resolution
_UpperCamelCase : int = max_resolution
_UpperCamelCase : Tuple = do_resize
_UpperCamelCase : List[str] = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : Dict = crop_size
_UpperCamelCase : List[str] = do_normalize
_UpperCamelCase : Optional[int] = image_mean
_UpperCamelCase : Optional[int] = image_std
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( lowerCAmelCase__ , unittest.TestCase ):
lowercase__ = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,'image_mean' ) )
self.assertTrue(hasattr(__lowerCAmelCase ,'image_std' ) )
self.assertTrue(hasattr(__lowerCAmelCase ,'do_normalize' ) )
self.assertTrue(hasattr(__lowerCAmelCase ,'do_resize' ) )
self.assertTrue(hasattr(__lowerCAmelCase ,'do_center_crop' ) )
self.assertTrue(hasattr(__lowerCAmelCase ,'size' ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_UpperCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_UpperCamelCase : Any = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : Dict = image_processing(__lowerCAmelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_UpperCamelCase : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : List[Any] = image_processing(__lowerCAmelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_UpperCamelCase : int = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processing(__lowerCAmelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 83 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( __lowerCAmelCase = 1_00_00_00 , __lowerCAmelCase = 10 )-> int:
'''simple docstring'''
UpperCAmelCase : List[str] =defaultdict(_A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : Dict =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[int] =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 348 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : int = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = '''speech_to_text_2'''
__SCREAMING_SNAKE_CASE : int = ['''past_key_values''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCamelCase=1_0000 , _lowerCamelCase=6 , _lowerCamelCase=2048 , _lowerCamelCase=4 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=1024 , **_lowerCamelCase , ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Any = dropout
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE : int = init_std
SCREAMING_SNAKE_CASE : Any = decoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : int = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Tuple = max_target_positions
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 313 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase_ : Union[str, Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase_ : int = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase_ : List[Any] = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase_ : int = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 63 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 0 |
def _A ( _lowercase , _lowercase ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : str = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __A ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = MvpTokenizer
__lowerCamelCase : Optional[Any] = MvpTokenizerFast
__lowerCamelCase : str = True
__lowerCamelCase : List[Any] = filter_roberta_detectors
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a__ (self , **A ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a__ (self , A ) -> str:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a__ (self ) -> str:
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase ) , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def a__ (self ) -> Any:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertNotIn('''labels''' , __lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase )
@require_torch
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def a__ (self ) -> Optional[int]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
def a__ (self ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_a = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_a = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
_a = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
"""simple docstring"""
_UpperCAmelCase = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 173 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
__A = "src/transformers"
# Matches is_xxx_available()
__A = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A = re.compile(r"^\s*try:")
# Catches a line with else:
__A = re.compile(r"^\s*else:")
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
if _re_test_backend.search(_A ) is None:
return None
__lowerCAmelCase: Optional[Any] = [b[0] for b in _re_backend.findall(_A )]
backends.sort()
return "_and_".join(_A )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
with open(_A , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: Any = f.readlines()
__lowerCAmelCase: List[Any] = 0
while line_index < len(_A ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_A ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase: List[str] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase: List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_A ):
__lowerCAmelCase: List[Any] = _re_one_line_import_struct.search(_A ).groups()[0]
__lowerCAmelCase: Optional[Any] = re.findall(R"\[([^\]]+)\]" , _A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase: Union[str, Any] = _re_import_struct_key_value.search(_A )
if single_line_import_search is not None:
__lowerCAmelCase: Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_A ) > 0]
objects.extend(_A )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase: Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase: Any = lines[line_index]
if _re_import_struct_add_one.search(_A ) is not None:
objects.append(_re_import_struct_add_one.search(_A ).groups()[0] )
elif _re_import_struct_add_many.search(_A ) is not None:
__lowerCAmelCase: Tuple = _re_import_struct_add_many.search(_A ).groups()[0].split(", " )
__lowerCAmelCase: str = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_between_brackets.search(_A ) is not None:
__lowerCAmelCase: int = _re_between_brackets.search(_A ).groups()[0].split(", " )
__lowerCAmelCase: Optional[int] = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_quote_object.search(_A ) is not None:
objects.append(_re_quote_object.search(_A ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
__lowerCAmelCase: Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase: int = []
while (
line_index < len(_A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase: Any = lines[line_index]
__lowerCAmelCase: Optional[Any] = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase: Dict = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_A ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase: str = lines[line_index]
__lowerCAmelCase: Any = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowerCAmelCase: Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase: int = []
for key in import_dict_objects.keys():
__lowerCAmelCase: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowerCAmelCase: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase: List[str] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a__ ( ) -> List[str]:
__lowerCAmelCase: Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
__lowerCAmelCase: Any = os.path.join(_A , "__init__.py" )
__lowerCAmelCase: Optional[Any] = parse_init(_A )
if objects is not None:
__lowerCAmelCase: Optional[Any] = analyze_results(*_A )
if len(_A ) > 0:
__lowerCAmelCase: Optional[Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(_A ) )
if len(_A ) > 0:
raise ValueError("\n\n".join(_A ) )
def a__ ( ) -> int:
__lowerCAmelCase: Tuple = []
for path, directories, files in os.walk(_A ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_A ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase: str = str((Path(_A ) / folder).relative_to(_A ) )
__lowerCAmelCase: Union[str, Any] = short_path.replace(os.path.sep , "." )
submodules.append(_A )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase: int = str((Path(_A ) / fname).relative_to(_A ) )
__lowerCAmelCase: str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_A )
return submodules
__A = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a__ ( ) -> Any:
from transformers.utils import direct_transformers_import
__lowerCAmelCase: List[str] = direct_transformers_import(_A )
__lowerCAmelCase: int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_A , "__init__.py" ) , "r" ) as f:
__lowerCAmelCase: Tuple = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , _A ) ) )
__lowerCAmelCase: int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_A ) > 0:
__lowerCAmelCase: Optional[Any] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 217 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ :str = logging.get_logger(__name__)
lowerCAmelCase__ :Dict = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase__ :Union[str, Any] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase__ :List[str] = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_A , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = collections.OrderedDict()
_UpperCAmelCase = collections.OrderedDict()
_UpperCAmelCase = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_A ):
_UpperCAmelCase = b
_UpperCAmelCase = idx
for wd in b:
_UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __a ( lowerCAmelCase__ ):
_a : List[str] = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : str = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|startoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , do_clean_text=__lowerCAmelCase , **__lowerCAmelCase , )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
_UpperCAmelCase = do_clean_text
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_vocab_and_emoji(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(__lowerCAmelCase , clean=self.do_clean_text )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.vocab.get(__lowerCAmelCase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__lowerCAmelCase )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = ''.join(__lowerCAmelCase ).strip()
return out_string
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 0
if os.path.isdir(__lowerCAmelCase ):
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
_UpperCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_UpperCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
_UpperCAmelCase = token_index
writer.write(','.join(__lowerCAmelCase ) + '\n' )
index += 1
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , __lowerCAmelCase )
return vocab_file, emoji_file
class __a ( lowerCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = vocab # same as swe
_UpperCAmelCase = ids_to_tokens # same as bpe
_UpperCAmelCase = emoji
_UpperCAmelCase = np.max([len(__lowerCAmelCase ) for w in self.vocab.keys()] )
_UpperCAmelCase = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
_UpperCAmelCase = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
_UpperCAmelCase = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
_UpperCAmelCase = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_UpperCAmelCase = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_UpperCAmelCase = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
_UpperCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_UpperCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_UpperCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.content_repattera.sub('<URL>' , __lowerCAmelCase )
_UpperCAmelCase = self.content_repattera.sub('<EMAIL>' , __lowerCAmelCase )
_UpperCAmelCase = self.content_repattera.sub('<TEL>' , __lowerCAmelCase )
_UpperCAmelCase = self.content_repattera.sub('<DATE>' , __lowerCAmelCase )
_UpperCAmelCase = self.content_repattera.sub('<DATE>' , __lowerCAmelCase )
_UpperCAmelCase = self.content_repattera.sub('<PRICE>' , __lowerCAmelCase )
_UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_UpperCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = text.replace(' ' , '<SP>' )
_UpperCAmelCase = text.replace(' ' , '<SP>' )
_UpperCAmelCase = text.replace('\r\n' , '<BR>' )
_UpperCAmelCase = text.replace('\n' , '<BR>' )
_UpperCAmelCase = text.replace('\r' , '<BR>' )
_UpperCAmelCase = text.replace('\t' , '<TAB>' )
_UpperCAmelCase = text.replace('—' , 'ー' )
_UpperCAmelCase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
_UpperCAmelCase = text.replace(__lowerCAmelCase , __lowerCAmelCase )
if clean:
_UpperCAmelCase = self.clean_text(__lowerCAmelCase )
def check_simbol(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = x.encode()
if len(__lowerCAmelCase ) == 1 and len(__lowerCAmelCase ) == 2:
_UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = x.encode()
if len(__lowerCAmelCase ) == 1 and len(__lowerCAmelCase ) == 3:
_UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
_UpperCAmelCase = 0
_UpperCAmelCase = []
while pos < len(__lowerCAmelCase ):
_UpperCAmelCase = min(len(__lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_UpperCAmelCase = [] # (token_id, token, pos)
for e in range(__lowerCAmelCase , __lowerCAmelCase , -1 ):
_UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__lowerCAmelCase ) > 2:
_UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__lowerCAmelCase ) > 0:
# the smallest token_id is adopted
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sorted(__lowerCAmelCase , key=lambda _SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(__lowerCAmelCase )
_UpperCAmelCase = e
else:
_UpperCAmelCase = pos + 1
_UpperCAmelCase = text[pos:end]
if check_simbol(__lowerCAmelCase ):
result.append('<KIGOU>' )
elif checkuae(__lowerCAmelCase ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
_UpperCAmelCase = end
return result
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="\n" ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__lowerCAmelCase ) > 0:
words.append(bytearray(__lowerCAmelCase ).decode('utf-8' , errors='replace' ) )
_UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(__lowerCAmelCase )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
words.append(bytearray(__lowerCAmelCase ).decode('utf-8' , errors='replace' ) )
_UpperCAmelCase = ''.join(__lowerCAmelCase )
return text
| 329 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
snake_case_ : Optional[Any] = logging.getLogger(__name__)
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
_UpperCamelCase : Optional[Any] = bnb_quantization_config.load_in_abit
_UpperCamelCase : Union[str, Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_UpperCamelCase : str = []
# custom device map
if isinstance(_A , _A ) and len(device_map.keys() ) > 1:
_UpperCamelCase : Union[str, Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_UpperCamelCase : Union[str, Any] = get_keys_to_not_convert(_A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_A )
_UpperCamelCase : Dict = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_UpperCamelCase : Dict = []
_UpperCamelCase : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_A )
# compatibility with peft
_UpperCamelCase : Tuple = load_in_abit
_UpperCamelCase : Optional[Any] = load_in_abit
_UpperCamelCase : int = get_parameter_device(_A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_UpperCamelCase : str = replace_with_bnb_layers(_A , _A , modules_to_not_convert=_A )
# convert param to the right dtype
_UpperCamelCase : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_UpperCamelCase : List[str] = name.replace('.weight' , '' ).replace('.bias' , '' )
_UpperCamelCase : str = getattr(_A , _A , _A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_A ):
param.to(_A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
_UpperCamelCase : List[str] = replace_with_bnb_layers(
_A , _A , modules_to_not_convert=_A )
_UpperCamelCase : Any = get_quantized_model_device_map(
_A , _A , _A , max_memory=_A , no_split_module_classes=_A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_UpperCamelCase : str = True
_UpperCamelCase : Optional[Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
_A , _A , _A , dtype=bnb_quantization_config.torch_dtype , offload_folder=_A , offload_state_dict=_A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_A , device_map=_A , offload_dir=_A )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if device_map is None:
if torch.cuda.is_available():
_UpperCamelCase : Optional[Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(_A , _A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_UpperCamelCase : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[Any] = special_dtypes
_UpperCamelCase : str = no_split_module_classes
_UpperCamelCase : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_UpperCamelCase : Any = get_balanced_memory(
_A , low_zero=(device_map == 'balanced_low_0') , max_memory=_A , **_A , )
_UpperCamelCase : Union[str, Any] = max_memory
_UpperCamelCase : Optional[int] = infer_auto_device_map(_A , **_A )
if isinstance(_A , _A ):
# check if don't have any quantized module on the cpu
_UpperCamelCase : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_UpperCamelCase : Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if modules_to_not_convert is None:
_UpperCamelCase : str = []
_UpperCamelCase , _UpperCamelCase : List[Any] = _replace_with_bnb_layers(
_A , _A , _A , _A )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
_UpperCamelCase : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
_UpperCamelCase : int = []
current_key_name.append(_A )
if isinstance(_A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_UpperCamelCase : Any = '.'.join(_A )
_UpperCamelCase : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_UpperCamelCase : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_UpperCamelCase : Optional[int] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_UpperCamelCase : List[str] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_UpperCamelCase : Optional[int] = module.weight.data
if module.bias is not None:
_UpperCamelCase : int = module.bias.data
bnb_module.requires_grad_(_A )
setattr(_A , _A , _A )
_UpperCamelCase : Tuple = True
if len(list(module.children() ) ) > 0:
_UpperCamelCase , _UpperCamelCase : Optional[int] = _replace_with_bnb_layers(
_A , _A , _A , _A )
_UpperCamelCase : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A__ ( UpperCAmelCase_ ):
with init_empty_weights():
_UpperCamelCase : List[str] = deepcopy(_A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_UpperCamelCase : int = find_tied_parameters(_A )
# For compatibility with Accelerate < 0.18
if isinstance(_A , _A ):
_UpperCamelCase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_UpperCamelCase : str = sum(_A , [] )
_UpperCamelCase : Union[str, Any] = len(_A ) > 0
# Check if it is a base model
_UpperCamelCase : int = False
if hasattr(_A , 'base_model_prefix' ):
_UpperCamelCase : Union[str, Any] = not hasattr(_A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_UpperCamelCase : Tuple = list(model.named_children() )
_UpperCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
_UpperCamelCase : int = set(_A ) - set(_A )
_UpperCamelCase : int = list(set(_A ) ) + list(_A )
# remove ".weight" from the keys
_UpperCamelCase : Any = ['.weight', '.bias']
_UpperCamelCase : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_UpperCamelCase : Union[str, Any] = name.replace(_A , '' )
filtered_module_names.append(_A )
return filtered_module_names
def A__ ( UpperCAmelCase_ ):
for m in model.modules():
if isinstance(_A , bnb.nn.Linearabit ):
return True
return False
def A__ ( UpperCAmelCase_ ):
return next(parameter.parameters() ).device
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if fpaa_statistics is None:
set_module_tensor_to_device(_A , _A , 0 , dtype=_A , value=_A )
_UpperCamelCase : Optional[Any] = param_name
_UpperCamelCase : List[Any] = model
if "." in tensor_name:
_UpperCamelCase : Union[str, Any] = tensor_name.split('.' )
for split in splits[:-1]:
_UpperCamelCase : Tuple = getattr(_A , _A )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
_UpperCamelCase : str = new_module
_UpperCamelCase : Optional[int] = splits[-1]
# offload weights
_UpperCamelCase : str = False
offload_weight(module._parameters[tensor_name] , _A , _A , index=_A )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _A , index=_A , )
else:
offload_weight(_A , _A , _A , index=_A )
offload_weight(_A , param_name.replace('weight' , 'SCB' ) , _A , index=_A )
set_module_tensor_to_device(_A , _A , 'meta' , dtype=_A , value=torch.empty(*param.size() ) )
| 83 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
| 267 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.tf_model_dir , '''parameters.json''' )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(open(_A ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
SCREAMING_SNAKE_CASE : int = args.output + '''.pt'''
SCREAMING_SNAKE_CASE : str = OrderedDict()
with tf.device('''/CPU:0''' ):
SCREAMING_SNAKE_CASE : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE : List[str] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE : Dict = reader.get_tensor(_A ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
SCREAMING_SNAKE_CASE : List[Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
SCREAMING_SNAKE_CASE : str = 8
SCREAMING_SNAKE_CASE : List[str] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_A )
elif key_name.startswith('''model/moe''' ):
SCREAMING_SNAKE_CASE : List[str] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
SCREAMING_SNAKE_CASE : Tuple = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
SCREAMING_SNAKE_CASE : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(_A )
elif key_name.endswith('''/softmlp/kernel''' ):
SCREAMING_SNAKE_CASE : Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
SCREAMING_SNAKE_CASE : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_A )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
SCREAMING_SNAKE_CASE : Optional[int] = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
SCREAMING_SNAKE_CASE : Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE : str = torch.tensor(_A )
elif key_name.startswith('''model/mlp''' ):
SCREAMING_SNAKE_CASE : List[str] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
SCREAMING_SNAKE_CASE : Any = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_A )
elif key_name.endswith('''/p1/bias''' ):
SCREAMING_SNAKE_CASE : List[str] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif key_name.endswith('''/p2/kernel''' ):
SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
SCREAMING_SNAKE_CASE : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_A )
elif key_name.endswith('''/p2/bias''' ):
SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif key_name.startswith('''model/ln''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.feed_forward.norm.bias''' % player
SCREAMING_SNAKE_CASE : Tuple = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Any = torch.tensor(_A )
elif key_name.endswith('''/g''' ):
SCREAMING_SNAKE_CASE : Dict = '''model.blocks.%d.feed_forward.norm.weight''' % player
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(_A )
elif key_name.startswith('''model/att''' ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
SCREAMING_SNAKE_CASE : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE : List[str] = state[:, 0, :, :]
SCREAMING_SNAKE_CASE : Optional[Any] = state[:, 1, :, :]
SCREAMING_SNAKE_CASE : int = state[:, 2, :, :]
SCREAMING_SNAKE_CASE : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
SCREAMING_SNAKE_CASE : str = torch.tensor(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
SCREAMING_SNAKE_CASE : Optional[int] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
SCREAMING_SNAKE_CASE : Dict = torch.tensor(_A )
elif key_name.endswith('''/o/kernel''' ):
SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
SCREAMING_SNAKE_CASE : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Dict = torch.tensor(_A )
elif key_name.startswith('''model/an''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
SCREAMING_SNAKE_CASE : List[Any] = '''model.blocks.%d.self_attn.norm.bias''' % player
SCREAMING_SNAKE_CASE : Tuple = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_A )
elif key_name.endswith('''/g''' ):
SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.self_attn.norm.weight''' % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
SCREAMING_SNAKE_CASE : List[Any] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE : Dict = '''model.%s.weight''' % nlayer
SCREAMING_SNAKE_CASE : int = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A )
if key_name.startswith('''model/wte''' ):
SCREAMING_SNAKE_CASE : int = '''lm_head.weight'''
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_A )
elif key_name.startswith('''model/wob''' ):
SCREAMING_SNAKE_CASE : Tuple = '''final_logits_bias'''
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Any = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE : Tuple = '''model.last_project.weight'''
SCREAMING_SNAKE_CASE : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_A )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE : Optional[int] = '''model.last_project.bias'''
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_A )
torch.save(_A , args.output )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a__ : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 313 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> None:
_a = generate_pascal_triangle(_A )
for row_idx in range(_A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _lowerCamelCase ( lowercase : int ) -> list[list[int]]:
if not isinstance(_A , _A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_a = []
for current_row_idx in range(_A ):
_a = populate_current_row(_A , _A )
triangle.append(_A )
return triangle
def _lowerCamelCase ( lowercase : list[list[int]] , lowercase : int ) -> list[int]:
_a = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_a , _a = 1, 1
for current_col_idx in range(1 , _A ):
calculate_current_element(
_A , _A , _A , _A )
return current_row
def _lowerCamelCase ( lowercase : list[list[int]] , lowercase : list[int] , lowercase : int , lowercase : int , ) -> None:
_a = triangle[current_row_idx - 1][current_col_idx - 1]
_a = triangle[current_row_idx - 1][current_col_idx]
_a = above_to_left_elt + above_to_right_elt
def _lowerCamelCase ( lowercase : int ) -> list[list[int]]:
if not isinstance(_A , _A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_a = [[1]]
for row_index in range(1 , _A ):
_a = [0] + result[-1] + [0]
_a = row_index + 1
# Calculate the number of distinct elements in a row
_a = sum(divmod(_A , 2 ) )
_a = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_a = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_a = row_first_half + row_second_half
result.append(_A )
return result
def _lowerCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase : Callable , lowercase : int ) -> None:
_a = F'{func.__name__}({value})'
_a = timeit(F'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_A , _A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 63 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (32, 32)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=32,)
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,)
return model
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = RobertaSeriesConfig(
hidden_size=32,project_dim=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=5006,)
return RobertaSeriesModelWithTransformation(__lowerCAmelCase )
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
def extract(*A_: Any,**A_: int ):
class __lowerCamelCase :
def __init__( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = torch.ones([0] )
def snake_case_ ( self: Tuple,A_: str ):
'''simple docstring'''
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__UpperCamelCase = 77
__UpperCamelCase = self.dummy_image.to(__lowerCAmelCase )
__UpperCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=__lowerCAmelCase,scheduler=__lowerCAmelCase,vae=__lowerCAmelCase,text_encoder=__lowerCAmelCase,tokenizer=__lowerCAmelCase,safety_checker=__lowerCAmelCase,feature_extractor=self.dummy_extractor,)
__UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor,do_normalize=__lowerCAmelCase )
__UpperCamelCase = alt_pipe.to(__lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
__UpperCamelCase = alt_pipe(
[prompt],generator=__lowerCAmelCase,guidance_scale=6.0,num_inference_steps=2,output_type='np',image=__lowerCAmelCase,)
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
__UpperCamelCase = alt_pipe(
[prompt],generator=__lowerCAmelCase,guidance_scale=6.0,num_inference_steps=2,output_type='np',image=__lowerCAmelCase,return_dict=__lowerCAmelCase,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda','This test requires a GPU' )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__UpperCamelCase = 77
__UpperCamelCase = self.dummy_image.to(__lowerCAmelCase )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=__lowerCAmelCase,scheduler=__lowerCAmelCase,vae=__lowerCAmelCase,text_encoder=__lowerCAmelCase,tokenizer=__lowerCAmelCase,safety_checker=__lowerCAmelCase,feature_extractor=self.dummy_extractor,)
__UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor,do_normalize=__lowerCAmelCase )
__UpperCamelCase = alt_pipe.to(__lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = alt_pipe(
[prompt],generator=__lowerCAmelCase,num_inference_steps=2,output_type='np',image=__lowerCAmelCase,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda','This test requires a GPU' )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCamelCase = init_image.resize((760, 504) )
__UpperCamelCase = 'BAAI/AltDiffusion'
__UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCAmelCase,safety_checker=__lowerCAmelCase,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase = 'A fantasy landscape, trending on artstation'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(
prompt=__lowerCAmelCase,image=__lowerCAmelCase,strength=0.7_5,guidance_scale=7.5,generator=__lowerCAmelCase,output_type='np',)
__UpperCamelCase = output.images[0]
__UpperCamelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__UpperCamelCase = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__UpperCamelCase = init_image.resize((768, 512) )
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
__UpperCamelCase = 'BAAI/AltDiffusion'
__UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCAmelCase,safety_checker=__lowerCAmelCase,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase = 'A fantasy landscape, trending on artstation'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(
prompt=__lowerCAmelCase,image=__lowerCAmelCase,strength=0.7_5,guidance_scale=7.5,generator=__lowerCAmelCase,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 310 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = XLMRobertaTokenizer
__lowerCAmelCase = XLMRobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def A (self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
A = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A (self : Dict ):
A = """<pad>"""
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A (self : Optional[int] ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowerCAmelCase ) , 1002 )
def A (self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def A (self : str ):
A = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A (self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(__lowerCAmelCase )
A = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(__lowerCAmelCase )
A = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(__lowerCAmelCase )
A = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(__lowerCAmelCase )
A = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@cached_property
def A (self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def A (self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name )
A = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase )
A = pickle.dumps(__lowerCAmelCase )
pickle.loads(__lowerCAmelCase )
def A (self : List[Any] ):
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = """I was born in 92000, and this is falsé."""
A = tokenizer.tokenize(__lowerCAmelCase )
A = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
A = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(__lowerCAmelCase )
A = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A (self : int ):
A = """Hello World!"""
A = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def A (self : Dict ):
A = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def A (self : List[str] ):
A = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 258 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=None , ) -> Union[str, Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def a__ (self ) -> Tuple:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ (self , A , A , A ) -> Tuple:
"""simple docstring"""
_a = ViTMSNModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A ) -> str:
"""simple docstring"""
_a = self.type_sequence_label_size
_a = ViTMSNForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = ViTMSNForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ViTMSNModelTester(self )
_a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def a__ (self ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__lowerCAmelCase )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def a__ (self ) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ViTMSNModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase ():
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def a__ (self ) -> Any:
"""simple docstring"""
torch.manual_seed(2 )
_a = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__lowerCAmelCase )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_a = model(**__lowerCAmelCase )
# verify the logits
_a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_a = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 211 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a ( lowerCAmelCase__ ):
UpperCamelCase : Dict = '''biogpt'''
def __init__( self : Optional[Any] , lowerCAmelCase : int=4_2384 , lowerCAmelCase : List[str]=1024 , lowerCAmelCase : int=24 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Any=4096 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : int=1024 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Any=0 , lowerCAmelCase : Union[str, Any]=2 , **lowerCAmelCase : Dict , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: int =num_attention_heads
SCREAMING_SNAKE_CASE_: Dict =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =initializer_range
SCREAMING_SNAKE_CASE_: List[str] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] =scale_embedding
SCREAMING_SNAKE_CASE_: int =use_cache
SCREAMING_SNAKE_CASE_: Tuple =layerdrop
SCREAMING_SNAKE_CASE_: str =activation_dropout
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 173 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_A )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCAmelCase: Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , _A )
for item in array )
__lowerCAmelCase: List[Any] = answer
return answer
__lowerCAmelCase: Any = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_A , _A )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: int = [0] * (target + 1)
__lowerCAmelCase: int = 1
for i in range(1 , target + 1 ):
for j in range(_A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = 3
__A = 5
__A = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 217 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 0 |
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( a__: np.ndarray , a__: int , a__: int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# compute the shape of the output matrix
_UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_UpperCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_UpperCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
return updated_arr
def lowerCAmelCase__ ( a__: np.ndarray , a__: int , a__: int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# compute the shape of the output matrix
_UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_UpperCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_UpperCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
lowerCAmelCase__ :Union[str, Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 329 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
snake_case_ : List[str] = numpy.array([0, 0])
snake_case_ : str = numpy.array([0.5, 0.8_66_02_54])
snake_case_ : Optional[int] = numpy.array([1, 0])
snake_case_ : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = initial_vectors
for _ in range(_A ):
_UpperCamelCase : Optional[int] = iteration_step(_A )
return vectors
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(_A )
_UpperCamelCase : List[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = numpy.radians(_A )
_UpperCamelCase , _UpperCamelCase : Optional[int] = numpy.cos(_A ), numpy.sin(_A )
_UpperCamelCase : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_A , _A )
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : str = zip(*_A )
plt.plot(_A , _A )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : List[str] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 83 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase__ ):
__lowerCamelCase : Union[str, Any] = ['''input_features''']
def __init__( self , snake_case__=80 , snake_case__=1_6000 , snake_case__=160 , snake_case__=30 , snake_case__=400 , snake_case__=0.0 , snake_case__=False , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCAmelCase : List[Any] =n_fft
UpperCAmelCase : List[Any] =hop_length
UpperCAmelCase : int =chunk_length
UpperCAmelCase : int =chunk_length * sampling_rate
UpperCAmelCase : List[Any] =self.n_samples // hop_length
UpperCAmelCase : Union[str, Any] =sampling_rate
UpperCAmelCase : int =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =spectrogram(
__lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCAmelCase : Optional[Any] =log_spec[:, :-1]
UpperCAmelCase : Optional[Any] =np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 )
UpperCAmelCase : str =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( snake_case__ , snake_case__ , snake_case__ = 0.0 ) -> List[Any]:
'''simple docstring'''
if attention_mask is not None:
UpperCAmelCase : int =np.array(__lowerCAmelCase , np.intaa )
UpperCAmelCase : Dict =[]
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase : Tuple =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase : str =padding_value
normed_input_values.append(__lowerCAmelCase )
else:
UpperCAmelCase : List[str] =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , snake_case__ , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = "max_length" , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> List[str]:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase : str =isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase : List[Any] =is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Union[str, Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
UpperCAmelCase : Optional[Any] =np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[str] =[np.asarray([raw_speech] ).T]
UpperCAmelCase : Optional[Any] =BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCAmelCase : Union[str, Any] =self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase : List[Any] =self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCAmelCase : Optional[int] =np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCAmelCase : Optional[int] =padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCAmelCase : Any =[self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase ):
UpperCAmelCase : List[Any] =[np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
UpperCAmelCase : List[Any] =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase : Optional[int] =padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase : int =padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any =copy.deepcopy(self.__dict__ )
UpperCAmelCase : List[str] =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 348 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
'''simple docstring'''
import numpy
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__SCREAMING_SNAKE_CASE = numpy.random.rand(3 , 1 )
# Real output values provided.
__SCREAMING_SNAKE_CASE = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__SCREAMING_SNAKE_CASE = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
__SCREAMING_SNAKE_CASE = self.feedforward()
self.back_propagation()
if give_loss:
__SCREAMING_SNAKE_CASE = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = input_arr
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def a__ ( a__ ):
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def a__ ( a__ ):
"""simple docstring"""
return (value) * (1 - (value))
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__SCREAMING_SNAKE_CASE = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__SCREAMING_SNAKE_CASE = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 267 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 0 |
from __future__ import annotations
def UpperCAmelCase_( a__ , a__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = word_bank or []
# create a table
SCREAMING_SNAKE_CASE : int = len(_A ) + 1
SCREAMING_SNAKE_CASE : Optional[int] = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 313 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 0 |
'''simple docstring'''
from math import sqrt
def _lowerCamelCase ( lowercase : int = 100_0000 ) -> int:
_a = 0
_a = 0
_a = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __lowerCamelCase (lowerCAmelCase__ ):
_lowercase = '''blenderbot-small'''
_lowercase = ['''past_key_values''']
_lowercase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: List[str],A_: Dict=5_0265,A_: List[str]=512,A_: Optional[int]=8,A_: List[Any]=2048,A_: Union[str, Any]=16,A_: str=8,A_: int=2048,A_: str=16,A_: List[Any]=0.0,A_: str=0.0,A_: List[str]=True,A_: int=True,A_: str="gelu",A_: Any=512,A_: int=0.1,A_: Optional[Any]=0.0,A_: Dict=0.0,A_: str=0.0_2,A_: Tuple=1,A_: Tuple=False,A_: Optional[Any]=0,A_: Optional[int]=1,A_: Optional[Any]=2,A_: List[str]=2,**A_: Optional[int],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase,bos_token_id=__lowerCAmelCase,eos_token_id=__lowerCAmelCase,is_encoder_decoder=__lowerCAmelCase,decoder_start_token_id=__lowerCAmelCase,forced_eos_token_id=__lowerCAmelCase,**__lowerCAmelCase,)
class __lowerCamelCase (lowerCAmelCase__ ):
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase = {0: 'batch'}
__UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
__UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase, __UpperCamelCase = self.num_layers
for i in range(__lowerCAmelCase ):
__UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
__UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase = super().outputs
else:
__UpperCamelCase = super(__lowerCAmelCase,self ).outputs
if self.use_past:
__UpperCamelCase, __UpperCamelCase = self.num_layers
for i in range(__lowerCAmelCase ):
__UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
__UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def snake_case_ ( self: Union[str, Any],A_: List[str],A_: Tuple = -1,A_: int = -1,A_: Optional[Any] = False,A_: Optional[Any] = None,):
'''simple docstring'''
__UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
# Generate decoder inputs
__UpperCamelCase = seq_length if not self.use_past else 1
__UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
__UpperCamelCase = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase = dict(**__lowerCAmelCase,**__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase, __UpperCamelCase = common_inputs['input_ids'].shape
__UpperCamelCase = common_inputs['decoder_input_ids'].shape[1]
__UpperCamelCase, __UpperCamelCase = self.num_attention_heads
__UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase = decoder_seq_length + 3
__UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__lowerCAmelCase,__lowerCAmelCase )],dim=1 )
__UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase, __UpperCamelCase = self.num_layers
__UpperCamelCase = min(__lowerCAmelCase,__lowerCAmelCase )
__UpperCamelCase = max(__lowerCAmelCase,__lowerCAmelCase ) - min_num_layers
__UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
__UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__lowerCAmelCase,__lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def snake_case_ ( self: int,A_: Optional[int],A_: Union[str, Any] = -1,A_: List[Any] = -1,A_: str = False,A_: List[Any] = None,):
'''simple docstring'''
__UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase, __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase, __UpperCamelCase = self.num_layers
__UpperCamelCase, __UpperCamelCase = self.num_attention_heads
__UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase = common_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__lowerCAmelCase,__lowerCAmelCase,dtype=__lowerCAmelCase )],dim=1 )
__UpperCamelCase = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def snake_case_ ( self: List[str],A_: Optional[int],A_: Union[str, Any] = -1,A_: Any = -1,A_: int = False,A_: Optional[Any] = None,):
'''simple docstring'''
__UpperCamelCase = compute_effective_axis_dimension(
__lowerCAmelCase,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
__UpperCamelCase = compute_effective_axis_dimension(
__lowerCAmelCase,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase = dict(tokenizer(__lowerCAmelCase,return_tensors=__lowerCAmelCase ) )
return common_inputs
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Optional[int] = -1,A_: Union[str, Any] = -1,A_: Any = False,A_: List[Any] = None,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase,batch_size=__lowerCAmelCase,seq_length=__lowerCAmelCase,is_pair=__lowerCAmelCase,framework=__lowerCAmelCase )
elif self.task == "causal-lm":
__UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase,batch_size=__lowerCAmelCase,seq_length=__lowerCAmelCase,is_pair=__lowerCAmelCase,framework=__lowerCAmelCase )
else:
__UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase,batch_size=__lowerCAmelCase,seq_length=__lowerCAmelCase,is_pair=__lowerCAmelCase,framework=__lowerCAmelCase )
return common_inputs
def snake_case_ ( self: List[Any],A_: List[str],A_: Dict,A_: List[str],A_: Optional[Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase = super()._flatten_past_key_values_(__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
else:
__UpperCamelCase = super(__lowerCAmelCase,self )._flatten_past_key_values_(
__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
| 310 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->tuple:
"""simple docstring"""
A = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase_ = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
lowercase_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase_ = "allenai"
def lowerCAmelCase (__A):
"""simple docstring"""
_a = dict((re.sub(r'''@@$''' , '''''' , _A), v) if k.endswith('''@@''') else (re.sub(r'''$''' , '''</w>''' , _A), v) for k, v in d.items())
_a = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_a = d[k] # restore
return da
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert os.path.exists(_A)
os.makedirs(_A , exist_ok=_A)
print(F'''Writing results to {pytorch_dump_folder_path}''')
# handle various types of models
_a = basename(_A)
_a = dirname(_A)
_a = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_a = cls.hub_models()
_a = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
_a = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''')
_a = hub_utils.from_pretrained(
_A , _A , _A , archive_map=_A , **_A)
_a = vars(chkpt['''args''']['''model'''])
_a = args['''source_lang''']
_a = args['''target_lang''']
_a = dirname(_A)
_a = basename(_A)
# dicts
_a = os.path.join(_A , F'''dict.{src_lang}.txt''')
_a = os.path.join(_A , F'''dict.{tgt_lang}.txt''')
_a = Dictionary.load(_A)
_a = rewrite_dict_keys(src_dict.indices)
_a = len(_A)
_a = os.path.join(_A , '''vocab-src.json''')
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''')
with open(_A , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_a = True
for k in src_vocab.keys():
if not k.islower():
_a = False
break
_a = Dictionary.load(_A)
_a = rewrite_dict_keys(tgt_dict.indices)
_a = len(_A)
_a = os.path.join(_A , '''vocab-tgt.json''')
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''')
with open(_A , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A))
# merges_file (bpecodes)
_a = os.path.join(_A , VOCAB_FILES_NAMES['''merges_file'''])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_a = os.path.join(_A , _A)
if os.path.exists(_A):
break
with open(_A , encoding='''utf-8''') as fin:
_a = fin.read()
_a = re.sub(r''' \d+$''' , '''''' , _A , 0 , re.M) # remove frequency number
print(F'''Generating {merges_file}''')
with open(_A , '''w''' , encoding='''utf-8''') as fout:
fout.write(_A)
# model config
_a = os.path.join(_A , '''config.json''')
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_a = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
_a = 5
_a = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_a = best_score_hparams[model_dir]['''length_penalty''']
else:
_a = 1.0
print(F'''Generating {fsmt_model_config_file}''')
with open(_A , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A))
# tokenizer config
_a = os.path.join(_A , _A)
_a = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1_024,
'''do_lower_case''': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''')
with open(_A , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A))
# model
_a = chkpt['''models'''][0]
_a = model.state_dict()
# rename keys to start with 'model.'
_a = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
_a = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_A , _A)
_a = FSMTConfig.from_pretrained(_A)
_a = FSMTForConditionalGeneration(_A)
# check that it loads ok
model_new.load_state_dict(_A , strict=_A)
# save
_a = os.path.join(_A , _A)
print(F'''Generating {pytorch_weights_dump_path}''')
torch.save(_A , _A)
print('''Conversion is done!''')
print('''\nLast step is to upload the files to s3''')
print(F'''cd {data_root}''')
print(F'''transformers-cli upload {model_dir}''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
_UpperCAmelCase = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_UpperCAmelCase = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_UpperCAmelCase = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_UpperCAmelCase = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_UpperCAmelCase = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_UpperCAmelCase = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_UpperCAmelCase = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_UpperCAmelCase = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = []
_UpperCAmelCase = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_UpperCAmelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_UpperCAmelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_UpperCAmelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE_: List[str] =getattr(_A , _A )
if weight_type is not None:
SCREAMING_SNAKE_CASE_: int =getattr(_A , _A ).shape
else:
SCREAMING_SNAKE_CASE_: Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_: str =value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_: Optional[int] =value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_: Any =value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_: str =value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_: Tuple =value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_: Optional[Any] =value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_: Tuple =value
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __magic_name__ ( lowercase , lowercase ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
if task == "s2t":
SCREAMING_SNAKE_CASE_: int =hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_: Optional[int] =MAPPING_S2T
SCREAMING_SNAKE_CASE_: Any =IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE_: Dict =None
SCREAMING_SNAKE_CASE_: Union[str, Any] =MAPPING_T2S
SCREAMING_SNAKE_CASE_: Any =IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE_: Optional[int] =hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_: Optional[int] =MAPPING_S2S
SCREAMING_SNAKE_CASE_: int =IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_A , _A ):
logger.info(f'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE_: List[str] =False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =key.split(""".*.""" )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_: Dict =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE_: Tuple =True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_: int =name.split(_A )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE_: Dict =mapped_key.replace("""*""" , _A )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_: str ="""weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] ="""weight_v"""
elif "bias" in name:
SCREAMING_SNAKE_CASE_: Any ="""bias"""
elif "weight" in name:
SCREAMING_SNAKE_CASE_: Tuple ="""weight"""
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_: Optional[int] ="""running_mean"""
elif "running_var" in name:
SCREAMING_SNAKE_CASE_: List[Any] ="""running_var"""
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_: str ="""num_batches_tracked"""
else:
SCREAMING_SNAKE_CASE_: Tuple =None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE_: List[str] =name.split(""".""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(items[0] )
SCREAMING_SNAKE_CASE_: List[str] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_: Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_: str =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_: List[Any] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_: List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =SpeechTaConfig.from_pretrained(_A )
else:
SCREAMING_SNAKE_CASE_: List[str] =SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE_: Tuple =config.max_text_positions
SCREAMING_SNAKE_CASE_: Dict =SpeechTaForSpeechToText(_A )
elif task == "t2s":
SCREAMING_SNAKE_CASE_: int =1876
SCREAMING_SNAKE_CASE_: Dict =600
SCREAMING_SNAKE_CASE_: Union[str, Any] =config.max_speech_positions
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaForTextToSpeech(_A )
elif task == "s2s":
SCREAMING_SNAKE_CASE_: Optional[Any] =1876
SCREAMING_SNAKE_CASE_: str =config.max_speech_positions
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaForSpeechToSpeech(_A )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
SCREAMING_SNAKE_CASE_: Any =SpeechTaTokenizer(_A , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Dict =AddedToken("""<mask>""" , lstrip=_A , rstrip=_A )
SCREAMING_SNAKE_CASE_: Optional[int] =mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_: Optional[int] =SpeechTaProcessor(tokenizer=_A , feature_extractor=_A )
processor.save_pretrained(_A )
SCREAMING_SNAKE_CASE_: str =torch.load(_A )
recursively_load_weights(fairseq_checkpoint["""model"""] , _A , _A )
model.save_pretrained(_A )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 173 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__A = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 217 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 0 |
import math
def lowerCAmelCase__ ( a__: int ) -> str:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while num > 0:
_UpperCAmelCase = num % 8
_UpperCAmelCase = octal + (remainder * math.floor(math.pow(1_0 , _A ) ))
counter += 1
_UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(_A )}'''
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(6_5 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_1_6 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 329 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = None
# Automatically constructed
lowercase__ = "dict"
lowercase__ = None
lowercase__ = field(default="""Translation""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : List[Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class lowercase__ :
lowercase__ = None
lowercase__ = None
lowercase__ = None
# Automatically constructed
lowercase__ = "dict"
lowercase__ = None
lowercase__ = field(default="""TranslationVariableLanguages""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[str] = sorted(set(self.languages ) ) if self.languages else None
_UpperCamelCase : int = len(self.languages ) if self.languages else None
def __call__( self : List[str] ):
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = set(self.languages )
if self.languages and set(__lowerCAmelCase ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(__lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__lowerCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCamelCase : Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCamelCase , _UpperCamelCase : str = zip(*sorted(__lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 83 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , snake_case__ = "▁" , snake_case__ = True , snake_case__ = "<unk>" , snake_case__ = "</s>" , snake_case__ = "<pad>" , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str ={
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
UpperCAmelCase : List[Any] =[None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase : str =token_dict['''token''']
UpperCAmelCase : Any =Tokenizer(Unigram() )
UpperCAmelCase : Any =normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
UpperCAmelCase : Any =pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=__lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase : int =decoders.Metaspace(replacement=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCAmelCase : Optional[int] =TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
UpperCAmelCase : Any ={
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = 8000 , snake_case__ = True , ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =trainers.UnigramTrainer(
vocab_size=__lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=__lowerCAmelCase , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] =[files]
self._tokenizer.train(__lowerCAmelCase , trainer=__lowerCAmelCase )
self.add_unk_id()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = 8000 , snake_case__ = True , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =trainers.UnigramTrainer(
vocab_size=__lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=__lowerCAmelCase , )
self._tokenizer.train_from_iterator(__lowerCAmelCase , trainer=__lowerCAmelCase )
self.add_unk_id()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =json.loads(self._tokenizer.to_str() )
UpperCAmelCase : Tuple =self.special_tokens['''unk''']['''id''']
UpperCAmelCase : str =Tokenizer.from_str(json.dumps(__lowerCAmelCase ) )
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowerCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = '''mvp'''
lowerCAmelCase__ = ['''past_key_values''']
lowerCAmelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int=50_267 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4_096 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=1_024 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[str]=100 , __SCREAMING_SNAKE_CASE : Union[str, Any]=800 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = classifier_dropout
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = use_prompt
__SCREAMING_SNAKE_CASE = prompt_length
__SCREAMING_SNAKE_CASE = prompt_mid_dim
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 267 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = CycleDiffusionPipeline(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = pipe(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowerCAmelCase , '''half''' ):
SCREAMING_SNAKE_CASE : List[str] = module.half()
SCREAMING_SNAKE_CASE : int = CycleDiffusionPipeline(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = pipe(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self ) ->Optional[int]:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def __lowerCAmelCase ( self ) ->Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self ) ->Any:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE : Any = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = CycleDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Optional[int] = '''A black colored car'''
SCREAMING_SNAKE_CASE : str = '''A blue colored car'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=__lowerCAmelCase , source_prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : int = '''A black colored car'''
SCREAMING_SNAKE_CASE : Optional[int] = '''A blue colored car'''
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=__lowerCAmelCase , source_prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[str] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 313 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = '▁'
lowerCAmelCase_ : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : Any = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : List[str] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['''input_ids''', '''attention_mask''']
__a =[]
def __init__( self : Optional[int] , __a : Tuple , __a : str , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : Any="<pad>" , __a : List[str]="<unk>" , __a : Dict=False , __a : str=False , __a : Any=None , __a : Any=None , __a : int = None , **__a : str , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_upper_case=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , lang_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__lowerCAmelCase )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : Tuple ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : str , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowerCAmelCase )
def UpperCamelCase__ ( self : Optional[Any] , __a : Union[str, Any] ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def UpperCamelCase__ ( self : int , __a : Tuple ):
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : Optional[Any] , __a : List[Any] ):
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def UpperCamelCase__ ( self : Tuple , __a : Union[str, Any] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_a = self.sp_model.decode(__lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : Dict , __a : Dict , __a : Union[str, Any]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Tuple , __a : int , __a : List[str] = None , __a : str = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def UpperCamelCase__ ( self : int ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : List[Any] , __a : Any ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : str , __a : List[Any] , __a : List[Any] = None ):
_a = Path(__lowerCAmelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(_A , "r" ) as f:
return json.load(_A )
def _lowerCamelCase ( lowercase : Dict , lowercase : str ) -> None:
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=2 )
| 63 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__snake_case = pd.read_csv('''sample_data.csv''', header=None)
__snake_case = df.shape[:1][0]
# If you're using some other dataset input the target column
__snake_case = df.iloc[:, 1:2]
__snake_case = actual_data.values.reshape(len_data, 1)
__snake_case = MinMaxScaler().fit_transform(actual_data)
__snake_case = 1_0
__snake_case = 5
__snake_case = 2_0
__snake_case = len_data - periods * look_back
__snake_case = actual_data[:division]
__snake_case = actual_data[division - look_back :]
__snake_case , __snake_case = [], []
__snake_case , __snake_case = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__snake_case = np.array(train_x)
__snake_case = np.array(test_x)
__snake_case = np.array([list(i.ravel()) for i in train_y])
__snake_case = np.array([list(i.ravel()) for i in test_y])
__snake_case = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__snake_case = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__snake_case = model.predict(x_test)
| 310 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = min(_A ) # min() finds the minimum value
A = max(_A ) # max() finds the maximum value
A = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
A = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_A , _A ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
A = 0
for count in range(_A ):
while holes[count] > 0:
holes[count] -= 1
A = count + min_val
i += 1
def __a ( ) ->Any:
"""simple docstring"""
A = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_A )
print("""Sorted order is:""" , """ """.join(_A ) )
if __name__ == "__main__":
main()
| 258 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A = 1 , __A = 1_000):
"""simple docstring"""
_a = 1
_a = 0
for divide_by_number in range(_A , digit + 1):
_a = []
_a = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_A):
_a = len(_A)
_a = divide_by_number
else:
has_been_divided.append(_A)
_a = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 0 |
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: Optional[int] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: Tuple =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _A ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: int =0
for i in range(_A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 173 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
"""simple docstring"""
from math import pi, sqrt
def a__ ( __SCREAMING_SNAKE_CASE ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(_A ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(_A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def a__ ( ) -> None:
assert gamma(0.5 ) == sqrt(_A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = 1.0
while num:
__A = float(input("Gamma of: "))
print(F'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 217 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( lowerCAmelCase__ , unittest.TestCase ):
_a : Optional[int] = UnCLIPImageVariationPipeline
_a : Optional[Any] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
_a : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
_a : Dict = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
_a : Optional[Any] = False
@property
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowerCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__lowerCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_UpperCAmelCase = UnCLIPTextProjModel(**__lowerCAmelCase )
return model
@property
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_UpperCAmelCase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(1 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_decoder
_UpperCAmelCase = self.dummy_text_proj
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_super_res_first
_UpperCAmelCase = self.dummy_super_res_last
_UpperCAmelCase = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_UpperCAmelCase = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
_UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ) -> str:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
if pil_image:
_UpperCAmelCase = input_image * 0.5 + 0.5
_UpperCAmelCase = input_image.clamp(0 , 1 )
_UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = DiffusionPipeline.numpy_to_pil(__lowerCAmelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_UpperCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch.device('cpu' )
class __a :
_a : Dict = 1
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipe.decoder.dtype
_UpperCAmelCase = 1
_UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
__lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler() )
_UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
__lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler() )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(
**__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase ).images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
# Don't pass image, instead pass embedding
_UpperCAmelCase = pipeline_inputs.pop('image' )
_UpperCAmelCase = pipe.image_encoder(__lowerCAmelCase ).image_embeds
_UpperCAmelCase = pipe(
**__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase , image_embeddings=__lowerCAmelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_UpperCAmelCase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCAmelCase , expected_max_diff=__lowerCAmelCase )
@skip_mps
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = torch_device == 'cpu'
_UpperCAmelCase = True
_UpperCAmelCase = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__lowerCAmelCase )
@skip_mps
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = pipeline(
__lowerCAmelCase , generator=__lowerCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase , 15 )
| 329 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case_ : Any = logging.get_logger(__name__)
# General docstring
snake_case_ : str = 'RegNetConfig'
# Base docstring
snake_case_ : Dict = 'facebook/regnet-y-040'
snake_case_ : Tuple = [1, 1088, 7, 7]
# Image classification docstring
snake_case_ : Tuple = 'facebook/regnet-y-040'
snake_case_ : List[str] = 'tabby, tabby cat'
snake_case_ : Tuple = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__ ( nn.Module ):
def __init__( self : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str = 3 ,lowerCamelCase__ : List[str] = 1 ,lowerCamelCase__ : str = 1 ,lowerCamelCase__ : Any = "relu" ,):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = nn.Convad(
__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=__lowerCAmelCase ,stride=__lowerCAmelCase ,padding=kernel_size // 2 ,groups=__lowerCAmelCase ,bias=__lowerCAmelCase ,)
_UpperCamelCase : int = nn.BatchNormad(__lowerCAmelCase )
_UpperCamelCase : Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Any = self.convolution(__lowerCAmelCase )
_UpperCamelCase : List[Any] = self.normalization(__lowerCAmelCase )
_UpperCamelCase : Optional[int] = self.activation(__lowerCAmelCase )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : str ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Optional[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
_UpperCamelCase : Optional[Any] = config.num_channels
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_UpperCamelCase : Union[str, Any] = self.embedder(__lowerCAmelCase )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any = 2 ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Tuple = nn.Convad(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ,stride=__lowerCAmelCase ,bias=__lowerCAmelCase )
_UpperCamelCase : Optional[int] = nn.BatchNormad(__lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.convolution(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = self.normalization(__lowerCAmelCase )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : int = nn.AdaptiveAvgPoolad((1, 1) )
_UpperCamelCase : List[str] = nn.Sequential(
nn.Convad(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.pooler(__lowerCAmelCase )
_UpperCamelCase : str = self.attention(__lowerCAmelCase )
_UpperCamelCase : Dict = hidden_state * attention
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Dict = in_channels != out_channels or stride != 1
_UpperCamelCase : Optional[int] = max(1 ,out_channels // config.groups_width )
_UpperCamelCase : Optional[Any] = (
RegNetShortCut(__lowerCAmelCase ,__lowerCAmelCase ,stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase : Optional[int] = nn.Sequential(
RegNetConvLayer(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(__lowerCAmelCase ,__lowerCAmelCase ,stride=__lowerCAmelCase ,groups=__lowerCAmelCase ,activation=config.hidden_act ) ,RegNetConvLayer(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ,activation=__lowerCAmelCase ) ,)
_UpperCamelCase : str = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : int = hidden_state
_UpperCamelCase : Union[str, Any] = self.layer(__lowerCAmelCase )
_UpperCamelCase : List[str] = self.shortcut(__lowerCAmelCase )
hidden_state += residual
_UpperCamelCase : Dict = self.activation(__lowerCAmelCase )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] = 1 ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Optional[int] = in_channels != out_channels or stride != 1
_UpperCamelCase : List[Any] = max(1 ,out_channels // config.groups_width )
_UpperCamelCase : Optional[Any] = (
RegNetShortCut(__lowerCAmelCase ,__lowerCAmelCase ,stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase : List[str] = nn.Sequential(
RegNetConvLayer(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(__lowerCAmelCase ,__lowerCAmelCase ,stride=__lowerCAmelCase ,groups=__lowerCAmelCase ,activation=config.hidden_act ) ,RegNetSELayer(__lowerCAmelCase ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(__lowerCAmelCase ,__lowerCAmelCase ,kernel_size=1 ,activation=__lowerCAmelCase ) ,)
_UpperCamelCase : List[str] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = hidden_state
_UpperCamelCase : Optional[Any] = self.layer(__lowerCAmelCase )
_UpperCamelCase : Any = self.shortcut(__lowerCAmelCase )
hidden_state += residual
_UpperCamelCase : Optional[Any] = self.activation(__lowerCAmelCase )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] = 2 ,lowerCamelCase__ : str = 2 ,):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_UpperCamelCase : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,stride=__lowerCAmelCase ,) ,*[layer(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for _ in range(depth - 1 )] ,)
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.layers(__lowerCAmelCase )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__lowerCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
_UpperCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase ,config.depths[1:] ):
self.stages.append(RegNetStage(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,depth=__lowerCAmelCase ) )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any = False ,lowerCamelCase__ : Any = True ):
'''simple docstring'''
_UpperCamelCase : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase : List[str] = hidden_states + (hidden_state,)
_UpperCamelCase : Optional[Any] = stage_module(__lowerCAmelCase )
if output_hidden_states:
_UpperCamelCase : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase ,hidden_states=__lowerCAmelCase )
class lowercase__ ( lowerCAmelCase__ ):
lowercase__ = RegNetConfig
lowercase__ = '''regnet'''
lowercase__ = '''pixel_values'''
lowercase__ = True
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
if isinstance(__lowerCAmelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(__lowerCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str=False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_UpperCamelCase : Optional[Any] = value
snake_case_ : str = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
snake_case_ : int = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase__ ( lowerCAmelCase__ ):
def __init__( self : str ,lowerCamelCase__ : Any ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = config
_UpperCamelCase : Optional[Any] = RegNetEmbeddings(__lowerCAmelCase )
_UpperCamelCase : int = RegNetEncoder(__lowerCAmelCase )
_UpperCamelCase : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any = None ,lowerCamelCase__ : Optional[int] = None ):
'''simple docstring'''
_UpperCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase : str = self.embedder(__lowerCAmelCase )
_UpperCamelCase : List[str] = self.encoder(
__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase ,return_dict=__lowerCAmelCase )
_UpperCamelCase : List[Any] = encoder_outputs[0]
_UpperCamelCase : Union[str, Any] = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase ,pooler_output=__lowerCAmelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase__ ( lowerCAmelCase__ ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
_UpperCamelCase : Tuple = config.num_labels
_UpperCamelCase : Optional[int] = RegNetModel(__lowerCAmelCase )
# classification head
_UpperCamelCase : List[str] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[str] = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : Optional[int] = None ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase : Optional[Any] = self.regnet(__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase ,return_dict=__lowerCAmelCase )
_UpperCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase : int = self.classifier(__lowerCAmelCase )
_UpperCamelCase : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase : List[Any] = 'single_label_classification'
else:
_UpperCamelCase : Union[str, Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
_UpperCamelCase : Tuple = MSELoss()
if self.num_labels == 1:
_UpperCamelCase : Tuple = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_UpperCamelCase : Optional[Any] = loss_fct(__lowerCAmelCase ,__lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase : int = CrossEntropyLoss()
_UpperCamelCase : Dict = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase : int = BCEWithLogitsLoss()
_UpperCamelCase : Optional[Any] = loss_fct(__lowerCAmelCase ,__lowerCAmelCase )
if not return_dict:
_UpperCamelCase : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase ,logits=__lowerCAmelCase ,hidden_states=outputs.hidden_states )
| 83 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
class __snake_case :
def __init__( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any ={}
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
UpperCAmelCase : Any =[to_vertex]
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 348 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 267 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(_A , _A )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Dict = getattr(_A , _A ).shape
else:
SCREAMING_SNAKE_CASE : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Tuple = value
else:
SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : str = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
SCREAMING_SNAKE_CASE : Dict = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : List[Any] = name.split(_A )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[Any] = mapped_key.replace('''*''' , _A )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_v'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = '''weight'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : Tuple = '''bias'''
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split('''.''' )
SCREAMING_SNAKE_CASE : str = int(items[0] )
SCREAMING_SNAKE_CASE : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCAmelCase_( a__ , a__ , a__=None , a__=None , a__=True ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : Optional[int] = HubertConfig.from_pretrained(_A )
else:
SCREAMING_SNAKE_CASE : int = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : int = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : str = target_dict.pad_index
SCREAMING_SNAKE_CASE : str = target_dict.bos_index
SCREAMING_SNAKE_CASE : int = target_dict.eos_index
SCREAMING_SNAKE_CASE : Dict = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : str = os.path.join(_A , '''vocab.json''' )
if not os.path.isdir(_A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _A )
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_A , )
SCREAMING_SNAKE_CASE : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
SCREAMING_SNAKE_CASE : str = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
SCREAMING_SNAKE_CASE : List[Any] = HubertForCTC(_A )
else:
SCREAMING_SNAKE_CASE : Dict = HubertModel(_A )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE : int = model[0].eval()
recursively_load_weights(_A , _A , _A )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ : Tuple = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 313 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Any = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 63 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 0 |
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
if index == r:
for j in range(_A ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCamelCase = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _A ( _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 310 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Optional[Any]=32 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
A = self.vocab_size - 1
def A (self : List[str] ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , *_lowerCAmelCase : Dict ):
A = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
A = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
A = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A (self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
A = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A (self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , *_lowerCAmelCase : Tuple ):
A = self.num_labels
A = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A (self : int ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A (self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=False ):
A = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
A = inputs_dict["""labels"""]
A = inputs_dict["""labels"""]
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A (self : Any ):
A = OpenAIGPTModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37 )
def A (self : Any ):
self.config_tester.run_common_tests()
def A (self : str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def A (self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def A (self : str ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : List[Any] ):
A = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(__lowerCAmelCase )
A = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
A = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 258 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
for ch in input_str:
_a = ord(_A)
_a = pow(2 , _A)
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 173 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : List[Any] = None , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : int = False , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : Any = None , **UpperCamelCase__ : int , )-> Any:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__lowerCAmelCase: Optional[Any] = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase) else {self.split: path_or_paths}
__lowerCAmelCase: Union[str, Any] = Text(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , **__lowerCAmelCase , )
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
if self.streaming:
__lowerCAmelCase: Union[str, Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__lowerCAmelCase: Any = None
__lowerCAmelCase: Optional[Any] = None
__lowerCAmelCase: str = None
__lowerCAmelCase: Any = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__lowerCAmelCase: Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
| 217 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __a ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 768 , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
_UpperCAmelCase = nn.Parameter(torch.ones(1 , __lowerCAmelCase ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(self.mean.to(__lowerCAmelCase ).to(__lowerCAmelCase ) )
_UpperCAmelCase = nn.Parameter(self.std.to(__lowerCAmelCase ).to(__lowerCAmelCase ) )
return self
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = (embeds * self.std) + self.mean
return embeds
| 329 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCamelCase : str = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__lowerCAmelCase ) ,torch_builtin(__lowerCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowerCAmelCase ) ,gelu_new(__lowerCAmelCase ) ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCamelCase : Tuple = get_activation('gelu' )
_UpperCamelCase : Optional[Any] = get_activation('gelu_10' )
_UpperCamelCase : int = torch_builtin(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = geluaa(__lowerCAmelCase )
_UpperCamelCase : List[Any] = torch.where(y_gelu_aa < 1_0.0 ,1 ,0 )
self.assertTrue(torch.max(__lowerCAmelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__lowerCAmelCase ):
get_activation('bogus' )
with self.assertRaises(__lowerCAmelCase ):
get_activation(__lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = get_activation('gelu' )
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Optional[int] = get_activation('gelu' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__lowerCAmelCase ):
_UpperCamelCase : str = acta.a
| 83 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {'''facebook/blenderbot_small-90M''': 5_12}
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =set()
UpperCAmelCase : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : List[Any] =char
UpperCAmelCase : Optional[int] =set(_A )
return pairs
class __snake_case ( lowerCAmelCase__ ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case__ , snake_case__ , snake_case__="__start__" , snake_case__="__end__" , snake_case__="__unk__" , snake_case__="__null__" , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : Optional[int] =json.load(__lowerCAmelCase )
UpperCAmelCase : Optional[Any] ={v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Any =merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : Optional[int] =[tuple(merge.split() ) for merge in merges]
UpperCAmelCase : List[str] =dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCAmelCase : str ={}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase : str =re.sub('''([.,!?()])''' , r''' \1''' , __lowerCAmelCase )
UpperCAmelCase : List[Any] =re.sub('''(\')''' , r''' \1 ''' , __lowerCAmelCase )
UpperCAmelCase : List[Any] =re.sub(r'''\s{2,}''' , ''' ''' , __lowerCAmelCase )
if "\n" in token:
UpperCAmelCase : int =token.replace('''\n''' , ''' __newln__''' )
UpperCAmelCase : Dict =token.split(''' ''' )
UpperCAmelCase : Union[str, Any] =[]
for token in tokens:
if not len(__lowerCAmelCase ):
continue
UpperCAmelCase : Optional[Any] =token.lower()
UpperCAmelCase : Any =tuple(__lowerCAmelCase )
UpperCAmelCase : List[Any] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCAmelCase : str =get_pairs(__lowerCAmelCase )
if not pairs:
words.append(__lowerCAmelCase )
continue
while True:
UpperCAmelCase : Union[str, Any] =min(__lowerCAmelCase , key=lambda snake_case__ : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Any =bigram
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : Tuple =0
while i < len(__lowerCAmelCase ):
try:
UpperCAmelCase : Optional[int] =word.index(__lowerCAmelCase , __lowerCAmelCase )
new_word.extend(word[i:j] )
UpperCAmelCase : Optional[Any] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Optional[int] =tuple(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =new_word
if len(__lowerCAmelCase ) == 1:
break
else:
UpperCAmelCase : Tuple =get_pairs(__lowerCAmelCase )
UpperCAmelCase : List[Any] ='''@@ '''.join(__lowerCAmelCase )
UpperCAmelCase : Any =word[:-4]
UpperCAmelCase : List[Any] =word
words.append(__lowerCAmelCase )
return " ".join(__lowerCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =[]
UpperCAmelCase : int =re.findall(r'''\S+\n?''' , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =token.lower()
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> str:
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[Any] =os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : int =os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
UpperCAmelCase : Tuple =0
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : str =token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def UpperCAmelCase__ ( self : Dict , **__SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__lowerCAmelCase , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 267 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = 8 , **_lowerCamelCase , ) ->List[str]:
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : str = do_pad
SCREAMING_SNAKE_CASE : Any = pad_size
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ) ->Tuple:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = get_image_size(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE : Union[str, Any] = (old_width // size + 1) * size - old_width
return pad(__lowerCAmelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE : Dict = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : int = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE : str = [self.pad(__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Any = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 313 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
while a != 0:
_a , _a = b % a, a
return b
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
if gcd(_A , _A ) != 1:
_a = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_A )
_a , _a , _a = 1, 0, a
_a , _a , _a = 0, 1, m
while va != 0:
_a = ua // va
_a , _a , _a , _a , _a , _a = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 63 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowerCamelCase :
_lowercase = BlenderbotConfig
_lowercase = {}
_lowercase = '''gelu'''
def __init__( self: List[str],A_: Union[str, Any],A_: int=13,A_: List[Any]=7,A_: int=True,A_: Dict=False,A_: Optional[int]=99,A_: Optional[int]=32,A_: List[Any]=2,A_: List[str]=4,A_: List[str]=37,A_: Any=0.1,A_: str=0.1,A_: List[Any]=20,A_: Any=2,A_: Tuple=1,A_: Dict=0,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ),1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor],axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,**self.config_updates,)
__UpperCamelCase = prepare_blenderbot_inputs_dict(__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
return config, inputs_dict
def snake_case_ ( self: Tuple,A_: Any,A_: Any ):
'''simple docstring'''
__UpperCamelCase = TFBlenderbotModel(config=__lowerCAmelCase ).get_decoder()
__UpperCamelCase = inputs_dict['input_ids']
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict['attention_mask'][:1, :]
__UpperCamelCase = inputs_dict['head_mask']
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(__lowerCAmelCase,attention_mask=__lowerCAmelCase,head_mask=__lowerCAmelCase,use_cache=__lowerCAmelCase )
__UpperCamelCase, __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3),config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3),2 ),tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens],axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask],axis=-1 )
__UpperCamelCase = model(__lowerCAmelCase,attention_mask=__lowerCAmelCase )[0]
__UpperCamelCase = model(__lowerCAmelCase,attention_mask=__lowerCAmelCase,past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1],output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,),output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase,__lowerCAmelCase,rtol=1E-3 )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCamelCase (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
_lowercase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_lowercase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_lowercase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase = True
_lowercase = False
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = TFBlenderbotModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=__lowerCAmelCase )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class __lowerCamelCase (unittest.TestCase ):
_lowercase = ['''My friends are cool but they eat too many carbs.''']
_lowercase = '''facebook/blenderbot-400M-distill'''
@cached_property
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer(self.src_text,return_tensors='tf' )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids,)
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy(),skip_special_tokens=__lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 310 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : str ):
A = """"""
A = """"""
A = []
A = 0
A = 256
A = 0
A = 0
A = 0
A = 0
def A (self : str , _lowerCAmelCase : Optional[Any] ):
A = cva.imread(__lowerCAmelCase , 0 )
A = copy.deepcopy(self.img )
A , A , A = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
A = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
A = x[i] / self.k
self.sk += prk
A = (self.L - 1) * self.sk
if self.rem != 0:
A = int(last % last )
A = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
A = int(np.ma.count(self.img ) / self.img[1].size )
A = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A = self.img[j][i]
if num != self.last_list[num]:
A = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def A (self : str ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A (self : Optional[int] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowerCamelCase : int = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_lowerCamelCase : Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 258 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = IFInpaintingPipeline
__lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase : Dict = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_dummy_components()
def a__ (self , A , A=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__lowerCAmelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__lowerCAmelCase )
else:
_a = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> int:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 211 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def __magic_name__ ( lowercase , lowercase = 0.0 , lowercase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__A = logging.get_logger(__name__)
class snake_case ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = CLIPConfig
SCREAMING_SNAKE_CASE_ : int = ['''CLIPEncoderLayer''']
def __init__( self : Optional[int] , UpperCamelCase__ : Any)-> Tuple:
'''simple docstring'''
super().__init__(__lowerCAmelCase)
__lowerCAmelCase: Any = CLIPVisionModelWithProjection(config.vision_config)
__lowerCAmelCase: Dict = nn.Linear(config.vision_config.projection_dim , 1)
__lowerCAmelCase: Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def lowercase_ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=0.5 , UpperCamelCase__ : List[str]=0.5)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.vision_model(__lowerCAmelCase)[0]
__lowerCAmelCase: Union[str, Any] = self.p_head(__lowerCAmelCase)
__lowerCAmelCase: str = nsfw_detected.flatten()
__lowerCAmelCase: Optional[int] = nsfw_detected > p_threshold
__lowerCAmelCase: List[str] = nsfw_detected.tolist()
if any(__lowerCAmelCase):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed.")
for idx, nsfw_detected_ in enumerate(__lowerCAmelCase):
if nsfw_detected_:
__lowerCAmelCase: str = np.zeros(images[idx].shape)
__lowerCAmelCase: Dict = self.w_head(__lowerCAmelCase)
__lowerCAmelCase: Tuple = watermark_detected.flatten()
__lowerCAmelCase: Optional[int] = watermark_detected > w_threshold
__lowerCAmelCase: List[str] = watermark_detected.tolist()
if any(__lowerCAmelCase):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed.")
for idx, watermark_detected_ in enumerate(__lowerCAmelCase):
if watermark_detected_:
__lowerCAmelCase: Union[str, Any] = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 217 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase__ :Any = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase__ :Optional[int] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class __a :
_a : int
_a : Node | None
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
_UpperCAmelCase = Node(__lowerCAmelCase , self.head )
def __iter__( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.head
while node:
yield node.data
_UpperCAmelCase = node.next_node
def __len__( self ) -> str:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ) -> Optional[int]:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def lowerCAmelCase__ ( a__: SortedLinkedList , a__: SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(_A ) + list(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 329 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : List[Any] = job['started_at']
_UpperCamelCase : List[str] = job['completed_at']
_UpperCamelCase : Dict = date_parser.parse(_A )
_UpperCamelCase : Tuple = date_parser.parse(_A )
_UpperCamelCase : str = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCamelCase : Optional[Any] = start
_UpperCamelCase : Optional[int] = end
_UpperCamelCase : List[Any] = duration_in_min
return job_info
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=None ):
_UpperCamelCase : Union[str, Any] = None
if token is not None:
_UpperCamelCase : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
_UpperCamelCase : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_UpperCamelCase : List[str] = requests.get(_A , headers=_A ).json()
_UpperCamelCase : Dict = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_A ) for job in result['jobs']} )
_UpperCamelCase : Tuple = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCamelCase : Optional[int] = requests.get(url + f'&page={i + 2}' , headers=_A ).json()
job_time.update({job['name']: extract_time_from_single_job(_A ) for job in result['jobs']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Any = get_job_time(args.workflow_run_id)
snake_case_ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 83 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
def lowerCAmelCase_ ( )-> list[list[int]]:
'''simple docstring'''
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__snake_case = generate_large_matrix()
__snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
assert all(row == sorted(_A , reverse=_A ) for row in grid )
assert all(list(_A ) == sorted(_A , reverse=_A ) for col in zip(*_A ) )
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : int =0
UpperCAmelCase : int =len(_A ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase : Dict =(left + right) // 2
UpperCAmelCase : Optional[int] =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase : Dict =mid + 1
else:
UpperCAmelCase : Optional[int] =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_A )
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : Tuple =len(grid[0] )
for i in range(len(_A ) ):
UpperCAmelCase : List[str] =find_negative_index(grid[i][:bound] )
total += bound
return (len(_A ) * len(grid[0] )) - total
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : List[str] =0
for row in grid:
for i, number in enumerate(_A ):
if number < 0:
total += len(_A ) - i
break
return total
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
UpperCAmelCase : int =(
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase : Tuple =timeit(f'''{func}(grid=grid)''' , setup=_A , number=5_00 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 348 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def a__ ( a__ , a__ = "cpu" , a__ = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.load(_A , map_location=_A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_A , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
__SCREAMING_SNAKE_CASE = v.half()
if save_path is None: # overwrite src_path
__SCREAMING_SNAKE_CASE = src_path
torch.save(_A , _A )
if __name__ == "__main__":
fire.Fire(convert)
| 267 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 0 |
a__ : int = '''Input must be a string of 8 numbers plus letter'''
a__ : Union[str, Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""Expected string as input, found {type(_A ).__name__}"""
raise TypeError(_A )
SCREAMING_SNAKE_CASE : Optional[Any] = spanish_id.replace('''-''' , '''''' ).upper()
if len(_A ) != 9:
raise ValueError(_A )
try:
SCREAMING_SNAKE_CASE : Tuple = int(spanish_id_clean[0:8] )
SCREAMING_SNAKE_CASE : Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_A ) from ex
if letter.isdigit():
raise ValueError(_A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : List[Any] ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 63 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 0 |
from jiwer import compute_measures
import datasets
__snake_case = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__snake_case = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/jitsi/jiwer/'],reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
],)
def snake_case_ ( self: Optional[Any],A_: str=None,A_: str=None,A_: Tuple=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(__lowerCAmelCase,__lowerCAmelCase )["wer"]
else:
__UpperCamelCase = 0
__UpperCamelCase = 0
for prediction, reference in zip(__lowerCAmelCase,__lowerCAmelCase ):
__UpperCamelCase = compute_measures(__lowerCAmelCase,__lowerCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 310 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class __UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__lowerCAmelCase = '''xlnet'''
__lowerCAmelCase = ['''mems''']
__lowerCAmelCase = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self : List[str] , _lowerCAmelCase : Tuple=3_2000 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : Union[str, Any]=24 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : List[Any]=4096 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Tuple="bi" , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=-1 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[int]="last" , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]="tanh" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Dict=2 , **_lowerCAmelCase : Optional[Any] , ):
A = vocab_size
A = d_model
A = n_layer
A = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
A = d_model // n_head
A = ff_activation
A = d_inner
A = untie_r
A = attn_type
A = initializer_range
A = layer_norm_eps
A = dropout
A = mem_len
A = reuse_len
A = bi_data
A = clamp_len
A = same_length
A = summary_type
A = summary_use_proj
A = summary_activation
A = summary_last_dropout
A = start_n_top
A = end_n_top
A = bos_token_id
A = pad_token_id
A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
A = kwargs["""use_cache"""]
A = use_mems_eval
A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A (self : Optional[Any] ):
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A (self : Any , _lowerCAmelCase : Optional[Any] ):
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 258 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = [True] * limit
_a = False
_a = False
_a = True
for i in range(3 , int(limit**0.5 + 1) , 2):
_a = i * 2
while index < limit:
_a = False
_a = index + i
_a = [2]
for i in range(3 , _A , 2):
if is_prime[i]:
primes.append(_A)
return primes
def lowerCAmelCase (__A = 1_000_000):
"""simple docstring"""
_a = prime_sieve(_A)
_a = 0
_a = 0
for i in range(len(_A)):
for j in range(i + length , len(_A)):
_a = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
_a = j - i
_a = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
"""simple docstring"""
import baseaa
def __magic_name__ ( lowercase ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __magic_name__ ( lowercase ):
return baseaa.baadecode(_A ).decode("""utf-8""" )
if __name__ == "__main__":
_UpperCAmelCase = """Hello World!"""
_UpperCAmelCase = baseaa_encode(test)
print(encoded)
_UpperCAmelCase = baseaa_decode(encoded)
print(decoded)
| 173 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( __SCREAMING_SNAKE_CASE=None ) -> Any:
if subparsers is not None:
__lowerCAmelCase: Union[str, Any] = subparsers.add_parser("tpu-config" , description=_description )
else:
__lowerCAmelCase: List[str] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__lowerCAmelCase: Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_A , default=_A , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_A , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_A , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__lowerCAmelCase: Optional[Any] = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_A , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
__lowerCAmelCase: Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCAmelCase: int = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCAmelCase: Tuple = defaults.commands
if not args.tpu_name:
__lowerCAmelCase: List[str] = defaults.tpu_name
if not args.tpu_zone:
__lowerCAmelCase: List[str] = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCAmelCase: int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__lowerCAmelCase: Any = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _A ):
__lowerCAmelCase: Optional[int] = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__lowerCAmelCase: Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
__lowerCAmelCase: Dict = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCAmelCase: List[Any] = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowerCAmelCase: Optional[Any] = "; ".join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCAmelCase: List[str] = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_A )}" )
return
subprocess.run(_A )
print("Successfully setup pod." )
def a__ ( ) -> Dict:
__lowerCAmelCase: str = tpu_command_parser()
__lowerCAmelCase: Optional[int] = parser.parse_args()
tpu_command_launcher(_A )
| 217 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase__ :Optional[Any] = datasets.load_iris()
lowerCAmelCase__ :List[str] = np.array(data['''data'''])
lowerCAmelCase__ :List[str] = np.array(data['''target'''])
lowerCAmelCase__ :Tuple = data['''target_names''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = train_test_split(X, y)
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def lowerCAmelCase__ ( a__: str , a__: Union[str, Any] , a__: List[str] , a__: Any , a__: Union[str, Any]=5 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = zip(_A , _A )
# List of distances of all points from the point to be classified
_UpperCAmelCase = []
for data_point in data:
_UpperCAmelCase = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCAmelCase = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCAmelCase = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 329 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
snake_case_ : List[Any] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class lowercase__ ( tr.AbstractTransform ):
def __init__( self : str ,lowerCamelCase__ : Dict = " " ):
'''simple docstring'''
_UpperCamelCase : Tuple = sentence_delimiter
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return list(__lowerCAmelCase )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = []
for sent_idx, sentence in enumerate(__lowerCAmelCase ):
chars.extend(self.process_string(__lowerCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
snake_case_ : Dict = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
snake_case_ : int = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
snake_case_ : int = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
snake_case_ : Any = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
snake_case_ : int = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__lowerCAmelCase ,__lowerCAmelCase ,truth_transform=__lowerCAmelCase ,hypothesis_transform=__lowerCAmelCase ,)["wer"]
_UpperCamelCase : List[str] = 0
_UpperCamelCase : List[Any] = 0
for prediction, reference in zip(__lowerCAmelCase ,__lowerCAmelCase ):
_UpperCamelCase : Optional[Any] = jiwer.compute_measures(
__lowerCAmelCase ,__lowerCAmelCase ,truth_transform=__lowerCAmelCase ,hypothesis_transform=__lowerCAmelCase ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 83 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
from collections import defaultdict
class __snake_case :
def __init__( self , snake_case__ , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase : Optional[int] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowerCAmelCase ) )
]
UpperCAmelCase : Any =defaultdict(__lowerCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase : Optional[Any] =(1 << len(__lowerCAmelCase )) - 1
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase : int =self.count_ways_until(__lowerCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCAmelCase : Union[str, Any] =total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
for i in range(len(__lowerCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(__lowerCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ['''image_processor''']
lowerCAmelCase__ = '''SamImageProcessor'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = -10
__SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
def __call__( self : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
__SCREAMING_SNAKE_CASE = encoding_image_processor["""original_sizes"""]
if hasattr(__lowerCAmelCase , """numpy""" ): # Checks if Torch or TF tensor
__SCREAMING_SNAKE_CASE = original_sizes.numpy()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._check_and_preprocess_points(
input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , input_boxes=__lowerCAmelCase , )
__SCREAMING_SNAKE_CASE = self._normalize_and_convert(
__lowerCAmelCase , __lowerCAmelCase , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , input_boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , )
return encoding_image_processor
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]="pt" , ) -> Any:
"""simple docstring"""
if input_points is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , original_sizes[0] ) for point in input_points
]
else:
__SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , __lowerCAmelCase )
for point, original_size in zip(__lowerCAmelCase , __lowerCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._pad_points_and_labels(__lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = np.array(__lowerCAmelCase )
if input_labels is not None:
__SCREAMING_SNAKE_CASE = np.array(__lowerCAmelCase )
if input_boxes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , original_sizes[0] , is_bounding_box=__lowerCAmelCase )
for box in input_boxes
]
else:
__SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , __lowerCAmelCase , is_bounding_box=__lowerCAmelCase )
for box, original_size in zip(__lowerCAmelCase , __lowerCAmelCase )
]
__SCREAMING_SNAKE_CASE = np.array(__lowerCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCAmelCase )
# boxes batch size of 1 by default
__SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__lowerCAmelCase )
# boxes batch size of 1 by default
__SCREAMING_SNAKE_CASE = tf.expand_dims(__lowerCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCAmelCase )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__lowerCAmelCase )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE = tf.expand_dims(__lowerCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCAmelCase )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__lowerCAmelCase )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE = tf.expand_dims(__lowerCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points] )
__SCREAMING_SNAKE_CASE = []
for i, point in enumerate(__lowerCAmelCase ):
if point.shape[0] != expected_nb_points:
__SCREAMING_SNAKE_CASE = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = processed_input_points
return input_points, input_labels
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = original_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(__lowerCAmelCase , longest_edge=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = deepcopy(__lowerCAmelCase ).astype(__lowerCAmelCase )
if is_bounding_box:
__SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2 )
__SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w)
__SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4 )
return coords
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , ) -> Dict:
"""simple docstring"""
if input_points is not None:
if hasattr(__lowerCAmelCase , """numpy""" ): # Checks for TF or Torch tensor
__SCREAMING_SNAKE_CASE = input_points.numpy().tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(input_points[0] , __lowerCAmelCase ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__SCREAMING_SNAKE_CASE = [np.array(__lowerCAmelCase ) for input_point in input_points]
else:
__SCREAMING_SNAKE_CASE = None
if input_labels is not None:
if hasattr(__lowerCAmelCase , """numpy""" ):
__SCREAMING_SNAKE_CASE = input_labels.numpy().tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(input_labels[0] , __lowerCAmelCase ):
raise ValueError("""Input labels must be a list of list integers.""" )
__SCREAMING_SNAKE_CASE = [np.array(__lowerCAmelCase ) for label in input_labels]
else:
__SCREAMING_SNAKE_CASE = None
if input_boxes is not None:
if hasattr(__lowerCAmelCase , """numpy""" ):
__SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist()
if (
not isinstance(__lowerCAmelCase , __lowerCAmelCase )
or not isinstance(input_boxes[0] , __lowerCAmelCase )
or not isinstance(input_boxes[0][0] , __lowerCAmelCase )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__SCREAMING_SNAKE_CASE = [np.array(__lowerCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
__SCREAMING_SNAKE_CASE = None
return input_points, input_labels, input_boxes
@property
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(__lowerCAmelCase ) )
def UpperCAmelCase__ ( self : Tuple , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_masks(*__lowerCAmelCase , **__lowerCAmelCase )
| 267 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 0 |
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = botoa.client('''iam''' )
SCREAMING_SNAKE_CASE : Dict = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_A , AssumeRolePolicyDocument=json.dumps(_A , indent=2 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_A , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=_A )["Role"]["Arn"]
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , _A , )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE : Tuple = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
SCREAMING_SNAKE_CASE : int = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
SCREAMING_SNAKE_CASE : List[Any] = _ask_field('''AWS Access Key ID: ''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = aws_access_key_id
SCREAMING_SNAKE_CASE : Tuple = _ask_field('''AWS Secret Access Key: ''' )
SCREAMING_SNAKE_CASE : Dict = aws_secret_access_key
SCREAMING_SNAKE_CASE : int = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
SCREAMING_SNAKE_CASE : int = aws_region
SCREAMING_SNAKE_CASE : str = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , _A , )
if role_management == 0:
SCREAMING_SNAKE_CASE : Dict = _ask_field('''Enter your IAM role name: ''' )
else:
SCREAMING_SNAKE_CASE : Dict = '''accelerate_sagemaker_execution_role'''
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_A )
SCREAMING_SNAKE_CASE : List[Any] = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE : List[str] = _ask_field('''Enter your Docker image: ''' , lambda a__ : str(_A ).lower() )
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE : List[Any] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda a__ : str(_A ).lower() , )
SCREAMING_SNAKE_CASE : int = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda a__ : str(_A ).lower() , )
SCREAMING_SNAKE_CASE : str = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
if use_dynamo:
SCREAMING_SNAKE_CASE : Dict = '''dynamo_'''
SCREAMING_SNAKE_CASE : str = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
SCREAMING_SNAKE_CASE : str = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
if use_custom_options:
SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''Which mode do you want to use?''' , _A , lambda a__ : TORCH_DYNAMO_MODES[int(_A )] , default='''default''' , )
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : Any = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : Optional[int] = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
_A , _A , lambda a__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE : str = _ask_field(_A , lambda a__ : str(_A ).lower() , default='''ml.p3.2xlarge''' )
SCREAMING_SNAKE_CASE : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
'''How many machines do you want use? [1]: ''' , _A , default=1 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=_A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_A , use_cpu=_A , dynamo_config=_A , eca_instance_type=_A , profile=_A , region=_A , iam_role_name=_A , mixed_precision=_A , num_machines=_A , sagemaker_inputs_file=_A , sagemaker_metrics_file=_A , )
| 313 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Dict = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Union[str, Any] = {
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['''input_ids''', '''attention_mask''']
__a =RobertaTokenizer
def __init__( self : Any , __a : Tuple=None , __a : List[Any]=None , __a : Optional[int]=None , __a : List[Any]="replace" , __a : str="<s>" , __a : int="</s>" , __a : Tuple="</s>" , __a : int="<s>" , __a : str="<unk>" , __a : int="<pad>" , __a : List[Any]="<mask>" , __a : int=False , __a : List[Any]=True , **__a : Union[str, Any] , ):
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCAmelCase ) != add_prefix_space:
_a = getattr(__lowerCAmelCase , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**__lowerCAmelCase )
_a = add_prefix_space
_a = "post_processor"
_a = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , __lowerCAmelCase ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , __lowerCAmelCase ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(__lowerCAmelCase , state.pop("type" ) )
_a = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def UpperCamelCase__ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self : Any , __a : Any ):
_a = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_a = value
def UpperCamelCase__ ( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ):
_a = kwargs.get("is_split_into_words" , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase__ ( self : List[Any] , *__a : str , **__a : int ):
_a = kwargs.get("is_split_into_words" , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase__ ( self : str , __a : Any , __a : int = None ):
_a = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def UpperCamelCase__ ( self : int , __a : Tuple , __a : Dict=None ):
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : List[str] , __a : Union[str, Any] , __a : List[Any] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 63 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
from __future__ import annotations
__snake_case = '''#'''
class __lowerCamelCase :
def __init__( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = {}
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = self._trie
for char in text:
if char not in trie:
__UpperCamelCase = {}
__UpperCamelCase = trie[char]
__UpperCamelCase = True
def snake_case_ ( self: Optional[int],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self._trie
for char in prefix:
if char in trie:
__UpperCamelCase = trie[char]
else:
return []
return self._elements(__lowerCAmelCase )
def snake_case_ ( self: List[str],A_: str ):
'''simple docstring'''
__UpperCamelCase = []
for c, v in d.items():
__UpperCamelCase = [' '] if c == END else [(c + s) for s in self._elements(__lowerCAmelCase )]
result.extend(__lowerCAmelCase )
return tuple(__lowerCAmelCase )
__snake_case = Trie()
__snake_case = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _A ( _lowercase ) -> tuple:
"""simple docstring"""
__UpperCamelCase = trie.find_word(_A )
return tuple(string + word for word in suffixes )
def _A ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 310 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=13 , _lowerCAmelCase : Union[str, Any]=30 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Optional[int]=10 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str=2 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A = (image_size // patch_size) ** 2
A = num_patches + 2
def A (self : int ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Dict ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ):
A = TFDeiTModel(config=__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
A = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : int , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = self.type_sequence_label_size
A = TFDeiTForImageClassification(__lowerCAmelCase )
A = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = TFDeiTForImageClassification(__lowerCAmelCase )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TFDeiTModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A (self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A (self : Optional[Any] ):
pass
def A (self : Union[str, Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense ) )
def A (self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A (self : str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def A (self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=False ):
A = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def A (self : Optional[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __a ( ) ->List[Any]:
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : int ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A (self : Optional[Any] ):
A = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
A = model(**__lowerCAmelCase )
# verify the logits
A = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
A = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 258 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if n == 1 or not isinstance(_A , _A):
return 0
elif n == 2:
return 1
else:
_a = [0, 1]
for i in range(2 , n + 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence[n]
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
_a = 2
while digits < n:
index += 1
_a = len(str(fibonacci(_A)))
return index
def lowerCAmelCase (__A = 1_000):
"""simple docstring"""
return fibonacci_digits_index(_A)
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 211 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Tuple =SamImageProcessor()
SCREAMING_SNAKE_CASE_: List[Any] =SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Union[str, Any] , **lowerCAmelCase : Any ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_: str =[Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE_: Optional[int] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SamProcessor(image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Any =image_processor(__lowerCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE_: Any =processor(images=__lowerCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Optional[int] =SamProcessor(image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: str =[[1764, 2646]]
SCREAMING_SNAKE_CASE_: Tuple =[[683, 1024]]
SCREAMING_SNAKE_CASE_: int =processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Optional[Any] =processor.post_process_masks(
__lowerCAmelCase , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_: Optional[Any] =[np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: List[Any] =processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Dict =[[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: str =processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: str =SamImageProcessor()
SCREAMING_SNAKE_CASE_: Optional[Any] =SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[Any] , **lowerCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_: int =[Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE_: Any =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE_: int =SamProcessor(image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Tuple =image_processor(__lowerCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE_: str =processor(images=__lowerCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Optional[int] =SamProcessor(image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[[1764, 2646]]
SCREAMING_SNAKE_CASE_: str =[[683, 1024]]
SCREAMING_SNAKE_CASE_: Any =processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: List[Any] =processor.post_process_masks(
__lowerCAmelCase , tf.convert_to_tensor(__lowerCAmelCase ) , tf.convert_to_tensor(__lowerCAmelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_: Any =[np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Optional[int] =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE_: List[Any] =processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SamImageProcessor()
SCREAMING_SNAKE_CASE_: Tuple =SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] , **lowerCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_: str =[Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Any =SamProcessor(image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Optional[int] =[tf.convert_to_tensor(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE_: str =[torch.tensor(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE_: Any =[[1764, 2646]]
SCREAMING_SNAKE_CASE_: str =[[683, 1024]]
SCREAMING_SNAKE_CASE_: str =processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE_: List[str] =processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.get_image_processor()
SCREAMING_SNAKE_CASE_: str =SamProcessor(image_processor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Dict =image_processor(__lowerCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE_: int =processor(images=__lowerCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE_: Optional[int] =image_processor(__lowerCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE_: Optional[Any] =processor(images=__lowerCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
| 173 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.