code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case_ = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def UpperCAmelCase ( UpperCAmelCase = 1777 , UpperCAmelCase = 1855 , UpperCAmelCase = 8 ) -> List[str]:
snake_case_ = base
for _ in range(1 , UpperCamelCase__ ):
snake_case_ = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : list[list[str]] = [[] for _ in range(_UpperCamelCase )]
__lowerCAmelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase : Optional[Any] = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = [''.join(_UpperCamelCase ) for row in temp_grid]
__lowerCAmelCase : int = ''.join(_UpperCamelCase )
return output_string
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__lowerCAmelCase : list[list[str]] = [[] for _ in range(_UpperCamelCase )] # generates template
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase : Tuple = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase : Tuple = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__lowerCAmelCase : str = 0
for row in temp_grid: # fills in the characters
__lowerCAmelCase : int = input_string[counter : counter + len(_UpperCamelCase )]
grid.append(list(_UpperCamelCase ) )
counter += len(_UpperCamelCase )
__lowerCAmelCase : List[Any] = '' # reads as zigzag
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase : str = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase : Any = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = {}
for key_guess in range(1 , len(_UpperCamelCase ) ): # tries every key
__lowerCAmelCase : Tuple = decrypt(_UpperCamelCase , _UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) <= 1:
return lst
__lowerCAmelCase : str = 1
while i < len(_UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCAmelCase : int = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted)) | 182 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( __UpperCamelCase ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """LayoutLMv2ImageProcessor"""
lowercase__ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,lowerCamelCase__ ,)
_UpperCamelCase : List[str] = kwargs.pop('feature_extractor' )
_UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None ,lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
_UpperCamelCase : List[Any] = self.image_processor(images=lowerCamelCase__ ,return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCamelCase : Any = features["""words"""]
_UpperCamelCase : str = self.tokenizer(
text=text if text is not None else features['words'] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features['boxes'] ,word_labels=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
# add pixel values
_UpperCamelCase : List[Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_UpperCamelCase : Optional[Any] = self.get_overflowing_images(lowerCamelCase__ ,encoded_inputs['overflow_to_sample_mapping'] )
_UpperCamelCase : List[str] = images
return encoded_inputs
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCamelCase : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}' )
return images_with_overflow
def UpperCamelCase_ ( self : List[Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,lowerCamelCase__ ,)
return self.image_processor_class
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,lowerCamelCase__ ,)
return self.image_processor
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Optional[int] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 236 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A__ = TypeVar("""KT""")
A__ = TypeVar("""VT""")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = "root" , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = value
_lowerCAmelCase = []
def __repr__( self ):
"""simple docstring"""
return F'Node({self.key}: {self.value})'
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = 0.5 , _snake_case = 16 ):
"""simple docstring"""
_lowerCAmelCase = Node[KT, VT]()
_lowerCAmelCase = 0
_lowerCAmelCase = p
_lowerCAmelCase = max_level
def __str__( self ):
"""simple docstring"""
_lowerCAmelCase = list(self )
if len(_snake_case ) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase = max((len(str(_snake_case ) ) for item in items) , default=4 )
_lowerCAmelCase = max(_snake_case , 4 ) + 4
_lowerCAmelCase = self.head
_lowerCAmelCase = []
_lowerCAmelCase = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_snake_case , """-""" ) + """* """ * len(_snake_case ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
while len(node.forward ) != 0:
_lowerCAmelCase = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_snake_case , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
_lowerCAmelCase = node.forward
lines.append("""None""".ljust(_snake_case ) + """* """ * len(_snake_case ) )
return F'SkipList(level={self.level})\n' + "\n".join(_snake_case )
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase = node.forward[0]
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
for i, update_node in enumerate(_snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase = node.forward[i]
else:
_lowerCAmelCase = update_node.forward[:i]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
_lowerCAmelCase = value
else:
_lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _snake_case ):
update_vector.append(self.head )
_lowerCAmelCase = level
_lowerCAmelCase = Node(_snake_case , _snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_snake_case )
else:
_lowerCAmelCase = new_node
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
return node.value
return None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
assert len(snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
if len(snake_case ) != 4:
print()
assert len(snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
assert skip_list.find("""Some key""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(snake_case ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_sorted(snake_case ):
return all(next_item >= item for item, next_item in zip(snake_case , lst[1:] ) )
_lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case , snake_case )
assert is_sorted(list(snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def lowercase_ ( self : str ):
super().setUp()
a : Optional[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case )
a : Tuple = tokenizer
def lowercase_ ( self : Union[str, Any] ):
a : str = '<pad>'
a : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowercase_ ( self : Optional[Any] ):
a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__snake_case ) , 10_11_22 )
def lowercase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def lowercase_ ( self : List[str] ):
a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a : Union[str, Any] = [0, 57, 30_18, 7_03_07, 91, 2]
a : Union[str, Any] = self.tokenizer(
__snake_case , max_length=len(__snake_case ) , padding=__snake_case , truncation=__snake_case , return_tensors='pt' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
a : Any = self.get_tokenizer()
a : Optional[int] = self.get_rust_tokenizer()
a : int = 'I was born in 92000, and this is falsé.'
a : int = tokenizer.tokenize(__snake_case )
a : Dict = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : str = self.get_rust_tokenizer()
a : Any = tokenizer.encode(__snake_case )
a : Tuple = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# fmt: off
a : Optional[int] = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a : Any = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=__snake_case , ) | 96 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 96 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 0 |
'''simple docstring'''
from math import sqrt
def UpperCAmelCase_ (__a : Tuple ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ (__a : Optional[int] = 1_0_0_0_1 ) -> int:
"""simple docstring"""
_a : Any = 0
_a : List[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
UpperCAmelCase__ : Optional[Any] = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict()
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ : List[Any] = reader.readlines()
for index, token in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ : Tuple = index
return vocab
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__=2_00 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = vocab
SCREAMING_SNAKE_CASE__ : Optional[Any] = unk_token
SCREAMING_SNAKE_CASE__ : Tuple = max_input_chars_per_word
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : List[Any] = []
while start < len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = None
while start < end:
SCREAMING_SNAKE_CASE__ : Any = """""".join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = end
return sub_tokens
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : Any = False
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<d>" , SCREAMING_SNAKE_CASE__="</d>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="</n>" , SCREAMING_SNAKE_CASE__="</_>" , SCREAMING_SNAKE_CASE__="left" , **SCREAMING_SNAKE_CASE__ , ) -> int:
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ , eod_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , line_token=SCREAMING_SNAKE_CASE__ , space_token=SCREAMING_SNAKE_CASE__ , padding_side=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : int = bod_token
SCREAMING_SNAKE_CASE__ : Any = eod_token
SCREAMING_SNAKE_CASE__ : str = load_vocab(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return self.encoder["\n"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __magic_name__ (self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ , cut_all=SCREAMING_SNAKE_CASE__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
return output_tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return token in self.encoder
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return "".join(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
SCREAMING_SNAKE_CASE__ : List[str] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
SCREAMING_SNAKE_CASE__ : int = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ : int = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder["""\n"""]
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
| 25 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,**__lowerCamelCase : Tuple ):
'''simple docstring'''
a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase ,prediction_type=__lowerCamelCase ,sample_max_value=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
a = model(__lowerCamelCase ,__lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a = pred_prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type='''v_prediction''' )
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
a = model(__lowerCamelCase ,__lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a = pred_prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
a = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
a = -1
else:
a = timesteps[i + 1]
a = scheduler.previous_timestep(__lowerCamelCase )
a = prev_t.item()
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 51, 0]
with self.assertRaises(__lowerCamelCase ,msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 1, 0]
a = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase ,timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 330 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
a = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = '''a''' * 1_0_0_0 + '''.lock'''
a = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
a = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 330 | 1 |
A : Union[str, Any] = 8.3_14_45_98
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A : List[Any] = 3_00
A : Tuple = 28
A : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 118 | """simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a):
def __init__( self , **a ) -> Dict:
super().__init__(**a )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(a )
def __call__( self , a , a = None , **a , ) -> List[str]:
if "text_queries" in kwargs:
lowercase__ : Optional[Any] = kwargs.pop('text_queries' )
if isinstance(a , (str, Image.Image) ):
lowercase__ : Optional[Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowercase__ : List[str] = image
lowercase__ : Optional[Any] = super().__call__(a , **a )
return results
def _UpperCAmelCase ( self , **a ) -> Dict:
lowercase__ : Optional[Any] = {}
if "threshold" in kwargs:
lowercase__ : Tuple = kwargs['threshold']
if "top_k" in kwargs:
lowercase__ : List[Any] = kwargs['top_k']
return {}, {}, postprocess_params
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : Any = load_image(inputs['image'] )
lowercase__ : Optional[int] = inputs['candidate_labels']
if isinstance(a , a ):
lowercase__ : Optional[int] = candidate_labels.split(',' )
lowercase__ : Optional[int] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(a ):
lowercase__ : List[str] = self.tokenizer(a , return_tensors=self.framework )
lowercase__ : List[Any] = self.image_processor(a , return_tensors=self.framework )
yield {
"is_last": i == len(a ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : List[Any] = model_inputs.pop('target_size' )
lowercase__ : Dict = model_inputs.pop('candidate_label' )
lowercase__ : Dict = model_inputs.pop('is_last' )
lowercase__ : Optional[int] = self.model(**a )
lowercase__ : Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def _UpperCAmelCase ( self , a , a=0.1 , a=None ) -> Union[str, Any]:
lowercase__ : Dict = []
for model_output in model_outputs:
lowercase__ : List[Any] = model_output['candidate_label']
lowercase__ : Optional[int] = BaseModelOutput(a )
lowercase__ : Any = self.image_processor.post_process_object_detection(
outputs=a , threshold=a , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowercase__ : Union[str, Any] = outputs['scores'][index].item()
lowercase__ : Tuple = self._get_bounding_box(outputs['boxes'][index][0] )
lowercase__ : Tuple = {'score': score, 'label': label, 'box': box}
results.append(a )
lowercase__ : Dict = sorted(a , key=lambda a : x["score"] , reverse=a )
if top_k:
lowercase__ : Dict = results[:top_k]
return results
def _UpperCAmelCase ( self , a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = box.int().tolist()
lowercase__ : Any = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 77 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
A_ : Optional[int] =1.0_5457_1817E-34 # unit of ℏ : J * s
A_ : Optional[int] =3E8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCamelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A_ : Union[str, Any] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a__ , )
super().__init__(*a__ , **a__ )
| 80 | 0 |
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCAmelCase_ : Optional[int] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase_ : List[Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase_ : Union[str, Any] = max(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) ,b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A : Union[str, Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
lowerCamelCase__ : Dict = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
lowerCamelCase__ : List[str] = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}] )
lowerCamelCase__ : Optional[int] = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
lowerCamelCase__ : Any = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
# Legacy behavior
lowerCamelCase__ : Dict = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
lowerCamelCase__ : str = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}]] )
lowerCamelCase__ : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
lowerCamelCase__ : Any = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"label": "LABEL_0", "score": 0.5_0_4},
{"label": "LABEL_0", "score": 0.5_0_4},
] , )
@require_torch
def lowerCAmelCase ( self : str ):
'''simple docstring'''
import torch
lowerCamelCase__ : int = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
lowerCamelCase__ : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
@require_tf
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
lowerCamelCase__ : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = pipeline("text-classification" )
lowerCamelCase__ : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCamelCase__ : Optional[int] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCamelCase__ : Tuple = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_8_8}] )
@slow
@require_tf
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = pipeline("text-classification" , framework="tf" )
lowerCamelCase__ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCamelCase__ : Optional[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCamelCase__ : Dict = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_8_8}] )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TextClassificationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ : List[Any] = "HuggingFace is in"
lowerCamelCase__ : Tuple = text_classifier(__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
lowerCamelCase__ : Optional[int] = ["HuggingFace is in ", "Paris is in France"]
lowerCamelCase__ : Dict = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}, {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ : List[Any] = text_classifier(__lowerCamelCase , top_k=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N, [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N] , )
lowerCamelCase__ : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
lowerCamelCase__ : List[Any] = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ : Any = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__lowerCamelCase ):
text_classifier(__lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ : int = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 184 | 0 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) + 1
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
# since string of zero length match pattern of zero length
lowerCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _SCREAMING_SNAKE_CASE ):
for j in range(1 , _SCREAMING_SNAKE_CASE ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase = dp[i - 1][j]
else:
lowerCAmelCase = 0
else:
lowerCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase = 'aab'
UpperCAmelCase = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''') | 368 |
'''simple docstring'''
from __future__ import annotations
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = 0 ) -> Dict:
lowerCAmelCase = key
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __snake_case ( self , A_ , A_ ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 187 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ (enum.Enum ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Any = 1
lowerCamelCase_ : int = 2
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase : Union[str, Any] = None
if self.model.config.prefix is not None:
lowerCamelCase : Dict = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
lowerCamelCase : Any = {**self._preprocess_params, **preprocess_params}
lowerCamelCase : Union[str, Any] = {**self._forward_params, **forward_params}
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Union[str, Any]:
lowerCamelCase : List[Any] = {}
if prefix is not None:
lowerCamelCase : List[Any] = prefix
if prefix:
lowerCamelCase : Any = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
lowerCamelCase : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowerCamelCase : List[str] = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
lowerCamelCase : List[Any] = generate_kwargs
lowerCamelCase : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase : int = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase : Tuple = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowerCamelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ) -> str:
lowerCamelCase : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
lowerCamelCase : int = prompt_text
if handle_long_generation == "hole":
lowerCamelCase : Any = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
lowerCamelCase : Any = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowerCamelCase : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase : Optional[int] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = model_inputs["input_ids"]
lowerCamelCase : Tuple = model_inputs.get("attention_mask" , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase : List[Any] = None
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = 1
else:
lowerCamelCase : Any = input_ids.shape[0]
lowerCamelCase : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase : Union[str, Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowerCamelCase : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase : Any = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase : List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase : List[str] = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase : Dict = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase : int = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ) -> str:
lowerCamelCase : Optional[int] = model_outputs["generated_sequence"][0]
lowerCamelCase : Tuple = model_outputs["input_ids"]
lowerCamelCase : int = model_outputs["prompt_text"]
lowerCamelCase : Any = generated_sequence.numpy().tolist()
lowerCamelCase : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase : Optional[int] = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase : Optional[Any] = 0
else:
lowerCamelCase : int = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase : int = prompt_text + text[prompt_length:]
else:
lowerCamelCase : Optional[Any] = text[prompt_length:]
lowerCamelCase : Optional[Any] = {"generated_text": all_text}
records.append(UpperCamelCase__ )
return records
| 48 |
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=None , _UpperCamelCase="no" , _UpperCamelCase="29500" ):
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
__lowerCAmelCase = True
elif "IPython" in sys.modules:
__lowerCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
__lowerCAmelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
__lowerCAmelCase = 8
__lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="TPU" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="127.0.01" , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ):
__lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="MULTI_GPU" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCAmelCase = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
__lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase )
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
| 356 |
"""simple docstring"""
from __future__ import annotations
import time
A : Union[str, Any] = list[tuple[int, int]]
A : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a ):
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = False
def snake_case ( self ):
while self.node_queue:
__lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(__a )
__lowerCAmelCase = self.get_successors(__a )
for node in successors:
self.node_queue.append(__a )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , __a ):
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) )
return successors
def snake_case ( self , __a ):
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = False
def snake_case ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCAmelCase = True
return self.retrace_bidirectional_path(
__a , __a )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__a ),
self.bwd_bfs: self.bwd_bfs.get_successors(__a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.fwd_bfs.retrace_path(__a )
__lowerCAmelCase = self.bwd_bfs.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : List[Any] = (0, 0)
A : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Dict = BreadthFirstSearch(init, goal)
A : Any = bfs.search()
A : List[str] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
A : Optional[Any] = time.time()
A : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
A : Any = bd_bfs.search()
A : str = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 259 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A__ : Optional[Any] = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , *A_ : List[Any] , **A_ : int):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_)
| 103 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 | 0 |
import requests
lowerCAmelCase__ :Optional[int] = '''''' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase__ :Tuple = '''https://api.openweathermap.org/data/2.5/'''
def lowerCAmelCase__ ( a__: str = "Chicago" , a__: str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCAmelCase__ ( a__: str = "Kolkata, India" , a__: str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCAmelCase__ ( a__: float = 55.68 , a__: float = 12.57 , a__: str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase__ :List[str] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 185 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase__ ( a__: NDArray[floataa] , a__: NDArray[floataa] , a__: list[int] , a__: int , ) -> list[float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape
_UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if colsa != 1:
_UpperCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if rowsa != rowsa:
_UpperCAmelCase = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a__ )
if len(a__ ) != rowsa:
_UpperCAmelCase = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(a__ )} and {rowsa}'''
)
raise ValueError(a__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCAmelCase , _UpperCAmelCase = table.shape
strictly_diagonally_dominant(a__ )
# Iterates the whole matrix for given number of times
for _ in range(a__ ):
_UpperCAmelCase = []
for row in range(a__ ):
_UpperCAmelCase = 0
for col in range(a__ ):
if col == row:
_UpperCAmelCase = table[row][col]
elif col == cols - 1:
_UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCAmelCase = (temp + val) / denom
new_val.append(a__ )
_UpperCAmelCase = new_val
return [float(a__ ) for i in new_val]
def lowerCAmelCase__ ( a__: NDArray[floataa] ) -> bool:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = table.shape
_UpperCAmelCase = True
for i in range(0 , a__ ):
_UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
__magic_name__ = value
else:
__magic_name__ = value
return new_state_dict
def a__ ( A_, A_=False ):
'''simple docstring'''
__magic_name__ = """"""
if is_panoptic:
__magic_name__ = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:256, :]
__magic_name__ = in_proj_bias[:256]
__magic_name__ = in_proj_weight[256:512, :]
__magic_name__ = in_proj_bias[256:512]
__magic_name__ = in_proj_weight[-256:, :]
__magic_name__ = in_proj_bias[-256:]
def a__ ( ):
'''simple docstring'''
__magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__magic_name__ = """resnet101"""
if "dc5" in model_name:
__magic_name__ = True
__magic_name__ = """panoptic""" in model_name
if is_panoptic:
__magic_name__ = 250
else:
__magic_name__ = 91
__magic_name__ = """huggingface/label-files"""
__magic_name__ = """coco-detection-id2label.json"""
__magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) )
__magic_name__ = {int(A_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
# load image processor
__magic_name__ = """coco_panoptic""" if is_panoptic else """coco_detection"""
__magic_name__ = ConditionalDetrImageProcessor(format=A_ )
# prepare image
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A_, return_tensors="""pt""" )
__magic_name__ = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
__magic_name__ = torch.hub.load("""DeppMeng/ConditionalDETR""", A_, pretrained=A_ ).eval()
__magic_name__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__magic_name__ = """conditional_detr.""" + src
rename_key(A_, A_, A_ )
__magic_name__ = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_, is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
# finally, create HuggingFace model and load state dict
__magic_name__ = ConditionalDetrForSegmentation(A_ ) if is_panoptic else ConditionalDetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
model.push_to_hub(repo_id=A_, organization="""DepuMeng""", commit_message="""Add model""" )
# verify our conversion
__magic_name__ = conditional_detr(A_ )
__magic_name__ = model(A_ )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 88 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_vision_model'
def __init__( self :List[str] , _A :str=1_408 , _A :List[str]=6_144 , _A :List[Any]=39 , _A :Optional[Any]=16 , _A :Tuple=224 , _A :Tuple=14 , _A :Tuple="gelu" , _A :Optional[Any]=1E-6 , _A :List[Any]=0.0 , _A :Dict=1E-10 , _A :List[str]=True , **_A :Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**_A )
__A = hidden_size
__A = intermediate_size
__A = num_hidden_layers
__A = num_attention_heads
__A = patch_size
__A = image_size
__A = initializer_range
__A = attention_dropout
__A = layer_norm_eps
__A = hidden_act
__A = qkv_bias
@classmethod
def lowercase_ ( cls :Any , _A :Union[str, os.PathLike] , **_A :Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_A )
__A , __A = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_qformer'
def __init__( self :Tuple , _A :int=30_522 , _A :List[str]=768 , _A :str=12 , _A :Optional[Any]=12 , _A :Union[str, Any]=3_072 , _A :str="gelu" , _A :Tuple=0.1 , _A :Dict=0.1 , _A :Dict=512 , _A :Union[str, Any]=0.02 , _A :int=1E-12 , _A :str=0 , _A :Union[str, Any]="absolute" , _A :List[str]=2 , _A :Optional[Any]=1_408 , **_A :Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = cross_attention_frequency
__A = encoder_hidden_size
@classmethod
def lowercase_ ( cls :int , _A :Union[str, os.PathLike] , **_A :int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_A )
__A , __A = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Any = 'instructblip'
UpperCAmelCase__ : List[Any] = True
def __init__( self :Dict , _A :int=None , _A :Optional[Any]=None , _A :Optional[Any]=None , _A :Optional[Any]=32 , **_A :List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
if vision_config is None:
__A = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__A = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__A = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__A = InstructBlipVisionConfig(**_A )
__A = InstructBlipQFormerConfig(**_A )
__A = text_config['model_type'] if 'model_type' in text_config else 'opt'
__A = CONFIG_MAPPING[text_model_type](**_A )
__A = self.text_config.tie_word_embeddings
__A = self.text_config.is_encoder_decoder
__A = num_query_tokens
__A = self.vision_config.hidden_size
__A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A = 1.0
__A = 0.02
@classmethod
def lowercase_ ( cls :int , _A :InstructBlipVisionConfig , _A :InstructBlipQFormerConfig , _A :PretrainedConfig , **_A :Any , ) -> Any:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.vision_config.to_dict()
__A = self.qformer_config.to_dict()
__A = self.text_config.to_dict()
__A = self.__class__.model_type
return output
| 161 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( __snake_case : int ) -> int:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
__A : Any = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( __snake_case : int = 2_00_00_00 ) -> List[Any]:
"""simple docstring"""
return sum(takewhile(lambda __snake_case : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 356 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''AutoImageProcessor'''
lowerCAmelCase = '''AutoTokenizer'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = self.image_processor
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__A : Any = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if images is not None:
__A : Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if text is not None and images is not None:
__A : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 190 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 226 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__A =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A =gaussian_filter(gray, 3, sigma=1)
__A =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 226 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def a__ ( lowercase : int, lowercase : int = 2, lowercase : int = 1, lowercase : int = 3, ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase : int, lowercase : int, lowercase : int ) -> int:
return (pow(lowercase, 2 ) + step) % modulus
for _ in range(lowercase ):
# These track the position within the cycle detection logic.
_UpperCamelCase = seed
_UpperCamelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_UpperCamelCase = rand_fn(lowercase, lowercase, lowercase )
_UpperCamelCase = rand_fn(lowercase, lowercase, lowercase )
_UpperCamelCase = rand_fn(lowercase, lowercase, lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_UpperCamelCase = gcd(hare - tortoise, lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_UpperCamelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowercase__ : Dict = parser.parse_args()
lowercase__ : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
lowercase__ : List[Any] = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 355 |
'''simple docstring'''
import random
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCamelCase = [ord(lowerCAmelCase__ ) for i in text]
_UpperCamelCase = []
_UpperCamelCase = []
for i in plain:
_UpperCamelCase = random.randint(1 , 300 )
_UpperCamelCase = (i + k) * k
cipher.append(lowerCAmelCase__ )
key.append(lowerCAmelCase__ )
return cipher, key
@staticmethod
def snake_case__ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = []
for i in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase__ ) )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ , lowercase__ : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 287 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = [False] * len(SCREAMING_SNAKE_CASE__ )
A__ = [-1] * len(SCREAMING_SNAKE_CASE__ )
def dfs(_lowerCamelCase : int , _lowerCamelCase : Any ):
A__ = True
A__ = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE__ , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE__ , 0 )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCAmelCase : Union[str, Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 237 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=False , lowercase_ : Tuple=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[Any]=5 , lowercase_ : Dict=4 , lowercase_ : List[Any]=37 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Any=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =parent
_lowerCamelCase : Tuple =batch_size
_lowerCamelCase : Any =seq_length
_lowerCamelCase : int =is_training
_lowerCamelCase : int =use_input_mask
_lowerCamelCase : List[str] =use_token_type_ids
_lowerCamelCase : Dict =use_labels
_lowerCamelCase : int =vocab_size
_lowerCamelCase : Optional[Any] =hidden_size
_lowerCamelCase : Union[str, Any] =num_hidden_layers
_lowerCamelCase : Any =num_attention_heads
_lowerCamelCase : Tuple =intermediate_size
_lowerCamelCase : List[str] =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : Optional[int] =attention_probs_dropout_prob
_lowerCamelCase : Any =max_position_embeddings
_lowerCamelCase : Optional[Any] =type_vocab_size
_lowerCamelCase : List[Any] =type_sequence_label_size
_lowerCamelCase : Union[str, Any] =initializer_range
_lowerCamelCase : Dict =num_labels
_lowerCamelCase : Optional[Any] =num_choices
_lowerCamelCase : Dict =scope
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] =None
if self.use_input_mask:
_lowerCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str =None
if self.use_token_type_ids:
_lowerCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Tuple =None
_lowerCamelCase : Optional[Any] =None
_lowerCamelCase : Optional[Any] =None
if self.use_labels:
_lowerCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[str] =model(lowercase_ , attention_mask=lowercase_ )
_lowerCamelCase : List[str] =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =True
_lowerCamelCase : str =LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_lowerCamelCase : str =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
_lowerCamelCase : List[Any] =model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] =True
_lowerCamelCase : Tuple =True
_lowerCamelCase : str =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
_lowerCamelCase : Any =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : Optional[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Optional[int] =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
_lowerCamelCase : Tuple =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
_lowerCamelCase : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[int] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCamelCase : int =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] =config_and_inputs
_lowerCamelCase : Union[str, Any] ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : List[Any] =(
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =LlamaModelTester(self )
_lowerCamelCase : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple =type
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str =3
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : int =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Optional[Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Dict =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] =3
_lowerCamelCase : List[Any] ='single_label_classification'
_lowerCamelCase : List[str] =input_dict['input_ids']
_lowerCamelCase : Any =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : str =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[int] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int =3
_lowerCamelCase : Optional[Any] ='multi_label_classification'
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : Tuple =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Tuple =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Optional[Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[Any] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase ( self : Optional[int] , lowercase_ : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] =ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase : List[str] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Optional[int] =LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
_lowerCamelCase : List[Any] =original_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Dict ={'type': scaling_type, 'factor': 10.0}
_lowerCamelCase : int =LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
_lowerCamelCase : Optional[int] =scaled_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase : int =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Dict =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_lowerCamelCase : List[Any] =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : List[str] =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : str =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Union[str, Any] =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Any =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_lowerCamelCase : Optional[Any] =model(torch.tensor(lowercase_ ) )
_lowerCamelCase : Optional[int] =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCamelCase : int =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple ='Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_lowerCamelCase : Union[str, Any] ='Simply put, the theory of relativity states that '
_lowerCamelCase : int =LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_lowerCamelCase : str =tokenizer.encode(lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ )
# greedy generation outputs
_lowerCamelCase : str =model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 199 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
UpperCamelCase_ : str = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase_ : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
UpperCamelCase_ : Union[str, Any] = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ) -> Optional[int]:
"""simple docstring"""
_snake_case = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __a ( _UpperCamelCase: str ) -> Optional[int]:
"""simple docstring"""
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="replace" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else bos_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else eos_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else sep_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else cls_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else unk_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
with open(_SCREAMING_SNAKE_CASE ,encoding="utf-8" ) as vocab_handle:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE ,encoding="utf-8" ) as merges_handle:
_snake_case = merges_handle.read().split("\n" )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase ( self ) -> List[str]:
return len(self.encoder )
def _lowercase ( self ) -> int:
return dict(self.encoder ,**self.added_tokens_encoder )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_SCREAMING_SNAKE_CASE )
_snake_case = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
_snake_case = min(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
_snake_case = word.index(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_SCREAMING_SNAKE_CASE )
_snake_case = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
_snake_case = get_pairs(_SCREAMING_SNAKE_CASE )
_snake_case = " ".join(_SCREAMING_SNAKE_CASE )
_snake_case = word
return word
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat ,_SCREAMING_SNAKE_CASE ):
_snake_case = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) )
return bpe_tokens
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.encoder.get(_SCREAMING_SNAKE_CASE ,self.encoder.get(self.unk_token ) )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
_snake_case = "".join(_SCREAMING_SNAKE_CASE )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ) + "\n" )
_snake_case = 0
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
_snake_case = token_index
writer.write(" ".join(_SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
_snake_case = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
_snake_case = " " + text
return (text, kwargs)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Union[str, Any]:
return token_ids_a + [self.eos_token_id]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[int]:
_snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_SCREAMING_SNAKE_CASE )
_snake_case = " ".join(_SCREAMING_SNAKE_CASE )
_snake_case = self.encode(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 368 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_snake_case = precision
_snake_case = ceil(precision / 14 )
_snake_case = 426_880 * Decimal(10_005 ).sqrt()
_snake_case = 1
_snake_case = 13_591_409
_snake_case = Decimal(_UpperCamelCase )
for k in range(1 , _UpperCamelCase ):
_snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCamelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase_ : Any = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 142 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( lowerCAmelCase_ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = LayoutLMTokenizer
lowerCamelCase : List[str] = LayoutLMTokenizerFast
lowerCamelCase : int = True
lowerCamelCase : Tuple = True
def A__ ( self ) -> int:
'''simple docstring'''
super().setUp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = """UNwant\u00E9d,running"""
lowercase__ = """unwanted, running"""
return input_text, output_text
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
| 164 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase__ :
_UpperCAmelCase :CommonSchedulerState
# setable values
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :Optional[int] = None
@classmethod
def UpperCAmelCase__ ( cls : int , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :DDPMSchedulerState
class lowercase__ ( snake_case__, snake_case__ ):
_UpperCAmelCase :Any = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase :jnp.dtype
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return True
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 1000 , snake_case__ : float = 0.0_001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
lowerCamelCase_ : str =dtype
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[CommonSchedulerState] = None ):
if common is None:
lowerCamelCase_ : int =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase_ : Optional[Any] =jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase_ : str =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
return sample
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
lowerCamelCase_ : Any =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase_ : List[str] =(jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : DDPMSchedulerState , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=None , snake_case__ : Any=None ):
lowerCamelCase_ : List[str] =state.common.alphas_cumprod[t]
lowerCamelCase_ : Union[str, Any] =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ : Tuple =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase_ : List[Any] =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase_ : List[str] =jnp.clip(snake_case__ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase_ : Dict =jnp.log(jnp.clip(snake_case__ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase_ : Optional[Any] =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase_ : Any =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase_ : List[str] =variance
lowerCamelCase_ : Optional[int] =state.common.betas[t]
lowerCamelCase_ : Dict =(predicted_variance + 1) / 2
lowerCamelCase_ : Dict =frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
lowerCamelCase_ : Union[str, Any] =timestep
if key is None:
lowerCamelCase_ : Dict =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase_ : Optional[Any] =jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCamelCase_ : List[str] =None
# 1. compute alphas, betas
lowerCamelCase_ : Union[str, Any] =state.common.alphas_cumprod[t]
lowerCamelCase_ : Dict =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase_ : Any =1 - alpha_prod_t
lowerCamelCase_ : List[str] =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ : int =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ : List[Any] =model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ : Tuple =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ : List[Any] =jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : int =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase_ : Optional[Any] =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : Any =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase_ : Union[str, Any] =jax.random.split(snake_case__ , num=1 )
lowerCamelCase_ : List[Any] =jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCamelCase_ : Tuple =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase_ : str =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 365 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A__ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : Any ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 209 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase__ = 'examples/'
UpperCAmelCase__ = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase__ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase__ = 'README.md'
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : int ) -> Union[str, Any]:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_snake_case = f.read()
_snake_case , _snake_case = REPLACE_PATTERNS[pattern]
_snake_case = replace.replace('''VERSION''' , __lowerCamelCase )
_snake_case = re_pattern.sub(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> str:
for folder, directories, fnames in os.walk(__lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern='''examples''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not patch:
update_version_in_examples(__lowerCamelCase )
def _UpperCAmelCase ( ) -> List[str]:
_snake_case = '''🤗 Transformers currently provides the following architectures'''
_snake_case = '''1. Want to contribute a new model?'''
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_snake_case = f.readlines()
# Find the start of the list.
_snake_case = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_snake_case = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_snake_case = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__lowerCamelCase )
def _UpperCAmelCase ( ) -> Any:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_snake_case = f.read()
_snake_case = REPLACE_PATTERNS['''init'''][0].search(__lowerCamelCase ).groups()[0]
return packaging.version.parse(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
_snake_case = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_snake_case = default_version.base_version
elif patch:
_snake_case = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_snake_case = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_snake_case = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCamelCase ) == 0:
_snake_case = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowerCamelCase , patch=__lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
_snake_case = get_version()
_snake_case = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_snake_case = current_version.base_version
# Check with the user we got that right.
_snake_case = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCamelCase ) == 0:
_snake_case = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowerCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 288 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = TFResNetModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_labels
_snake_case = TFResNetForImageClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] ):
_snake_case = TFResNetModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = model_class(_lowerCamelCase )
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 288 | 1 |
from math import loga
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger()
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: str , __UpperCAmelCase: LevitConfig , __UpperCAmelCase: Path , __UpperCAmelCase: bool = True ) -> int:
print(f"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase__ : List[Any] = timm.create_model('''levit_128s''' , pretrained=__UpperCAmelCase )
else:
UpperCamelCase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__UpperCAmelCase )
if hidden_sizes == 192:
UpperCamelCase__ : str = timm.create_model('''levit_192''' , pretrained=__UpperCAmelCase )
if hidden_sizes == 256:
UpperCamelCase__ : Any = timm.create_model('''levit_256''' , pretrained=__UpperCAmelCase )
if hidden_sizes == 384:
UpperCamelCase__ : int = timm.create_model('''levit_384''' , pretrained=__UpperCAmelCase )
from_model.eval()
UpperCamelCase__ : int = LevitForImageClassificationWithTeacher(__UpperCAmelCase ).eval()
UpperCamelCase__ : str = OrderedDict()
UpperCamelCase__ : Any = from_model.state_dict()
UpperCamelCase__ : Dict = list(from_model.state_dict().keys() )
UpperCamelCase__ : Tuple = list(our_model.state_dict().keys() )
print(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for i in range(len(__UpperCAmelCase ) ):
UpperCamelCase__ : int = weights[og_keys[i]]
our_model.load_state_dict(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = torch.randn((2, 3, 224, 224) )
UpperCamelCase__ : Any = from_model(__UpperCAmelCase )
UpperCamelCase__ : Any = our_model(__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), "The model logits don't match the original one."
UpperCamelCase__ : List[Any] = name
print(__UpperCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"Pushed {checkpoint_name}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Path , __UpperCAmelCase: str = None , __UpperCAmelCase: bool = True ) -> List[str]:
UpperCamelCase__ : Any = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = 1000
UpperCamelCase__ : List[str] = (1, num_labels)
UpperCamelCase__ : str = '''huggingface/label-files'''
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : List[Any] = idalabel
UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
UpperCamelCase__ : Optional[Any] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 247 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""ViTFeatureExtractor"""]
_A = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 212 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A_ :
def lowercase ( self : str , snake_case_ : int , snake_case_ : int ):
pass
def lowercase ( self : Optional[Any] ):
pass
def lowercase ( self : int ):
pass
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : int , snake_case_ : List[str]=None , **snake_case_ : int ):
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : List[str] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple=None , **snake_case_ : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=None , **snake_case_ : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , **snake_case_ : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = after_output[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
def lowercase ( self : int , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int=None , **snake_case_ : str ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ):
_UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@slow
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_pretrained_model_and_inputs()
_UpperCAmelCase = model_a(**snake_case_ )
_UpperCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
_UpperCAmelCase = model_a(**snake_case_ )
_UpperCAmelCase = after_outputs[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = TFViTModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFBertModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : int ):
_UpperCAmelCase = TFViTModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : List[Any] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : int , snake_case_ : int , snake_case_ : Dict , snake_case_ : str , snake_case_ : Tuple , snake_case_ : str=None , **snake_case_ : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
_UpperCAmelCase = TFDeiTModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFRobertaModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : Any ):
_UpperCAmelCase = TFDeiTModelTester(self )
_UpperCAmelCase = TFRobertaModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = TFCLIPVisionModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFBertModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFCLIPVisionModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : int ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=snake_case_ )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case_ , padding=snake_case_ , return_tensors="np" )
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1e-3 ) )
| 22 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any]=13 ,__lowerCamelCase : Tuple=7 ,__lowerCamelCase : List[str]=True ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : int=True ,__lowerCamelCase : List[str]=True ,__lowerCamelCase : List[str]=99 ,__lowerCamelCase : Optional[Any]=32 ,__lowerCamelCase : Optional[int]=5 ,__lowerCamelCase : List[Any]=4 ,__lowerCamelCase : int=37 ,__lowerCamelCase : Union[str, Any]="gelu" ,__lowerCamelCase : Optional[int]=0.1 ,__lowerCamelCase : Optional[Any]=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=16 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : List[str]=0.02 ,__lowerCamelCase : Union[str, Any]=4 ,):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxBertModel.from_pretrained('''bert-base-cased''' )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
| 330 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'yolos'
def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return 12
| 330 | 1 |
import math
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(_UpperCamelCase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( _UpperCamelCase = 10001 ) ->int:
"""simple docstring"""
try:
lowercase : List[Any] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
lowercase : list[int] = []
lowercase : Dict = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A( datasets.BuilderConfig ):
snake_case_ = 1_0_0_0_0
snake_case_ = None
snake_case_ = None
class __A( datasets.ArrowBasedBuilder ):
snake_case_ = ParquetConfig
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__a = data_files
if isinstance(a_ , a_ ):
__a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__a = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__a = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__a = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__a = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__a = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise | 352 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple:
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(a__ )
else:
__a = UniSpeechSatConfig()
__a = ''''''
if is_finetuned:
__a = UniSpeechSatForCTC(a__ )
else:
__a = UniSpeechSatForPreTraining(a__ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 33 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase : Dict = 'Muhammad Umer Farooq'
UpperCAmelCase : Any = 'MIT'
UpperCAmelCase : Tuple = '1.0.0'
UpperCAmelCase : Optional[Any] = 'Muhammad Umer Farooq'
UpperCAmelCase : Union[str, Any] = 'contact@muhammadumerfarooq.me'
UpperCAmelCase : Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = domain
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str | None]] ) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__SCREAMING_SNAKE_CASE = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def a__ ( a__ ):
"""simple docstring"""
return ".".join(get_sub_domain_name(a__ ).split(""".""" )[-2:] )
def a__ ( a__ ):
"""simple docstring"""
return parse.urlparse(a__ ).netloc
def a__ ( a__ = "https://github.com" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_domain_name(a__ )
# Initialize the parser
__SCREAMING_SNAKE_CASE = Parser(a__ )
try:
# Open URL
__SCREAMING_SNAKE_CASE = requests.get(a__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__SCREAMING_SNAKE_CASE = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__SCREAMING_SNAKE_CASE = requests.get(a__ )
# Get the valid email.
__SCREAMING_SNAKE_CASE = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a__ )
if __name__ == "__main__":
UpperCAmelCase : int = emails_from_url('https://github.com')
print(f"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 267 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_SCREAMING_SNAKE_CASE = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=16 , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=14 , lowerCAmelCase_=10 , lowerCAmelCase_=19 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=True , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=[1, 2, 3, 4, 5] , lowerCAmelCase_=25 , lowerCAmelCase_=5 , ) -> Tuple:
_A = d_model
_A = parent
_A = batch_size
_A = prediction_length
_A = context_length
_A = cardinality
_A = num_time_features
_A = lags_sequence
_A = embedding_dimension
_A = is_training
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = context_length
_A = prediction_length + label_length
_A = label_length
_A = moving_average
_A = autocorrelation_factor
def UpperCAmelCase ( self ) -> int:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_A = config.context_length + max(config.lags_sequence )
_A = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_A = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_A = floats_tensor([self.batch_size, _past_length] )
_A = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_A = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_A = floats_tensor([self.batch_size, config.prediction_length] )
_A = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_config()
_A = self.prepare_autoformer_inputs_dict(__A )
return config, inputs_dict
def UpperCAmelCase ( self ) -> Optional[int]:
_A , _A = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = AutoformerModel(config=__A ).to(__A ).eval()
_A = model(**__A )
_A = outputs.encoder_last_hidden_state
_A = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_encoder()
encoder.save_pretrained(__A )
_A = AutoformerEncoder.from_pretrained(__A ).to(__A )
_A , _A , _A , _A , _A = model.create_network_inputs(**__A )
_A , _A = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_A = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_A = encoder(inputs_embeds=__A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_A = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_A = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_A = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_A = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_decoder()
decoder.save_pretrained(__A )
_A = AutoformerDecoder.from_pretrained(__A ).to(__A )
_A = decoder(
trend=__A , inputs_embeds=__A , encoder_hidden_states=__A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase :str = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase :int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowerCamelCase :Any = False
lowerCamelCase :int = False
lowerCamelCase :int = False
lowerCamelCase :int = False
lowerCamelCase :str = False
lowerCamelCase :Tuple = False
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = AutoformerModelTester(self )
_A = ConfigTester(self , config_class=__A , has_text_modality=__A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
_A , _A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_A , _A = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = inspect.signature(getattr(__A , """forward""" ) )
# The main input is the name of the argument after `self`
_A = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __A )
def UpperCAmelCase ( self ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__A )] , __A )
def UpperCAmelCase ( self ) -> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , """seq_length""" , __A )
_A = getattr(self.model_tester , """decoder_seq_length""" , __A )
_A = getattr(self.model_tester , """encoder_seq_length""" , __A )
_A = getattr(self.model_tester , """d_model""" , __A )
_A = getattr(self.model_tester , """num_attention_heads""" , __A )
_A = d_model // num_attention_heads
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs.encoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_A = len(__A )
_A = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__A , __A )
# decoder attentions
_A = outputs.decoder_attentions
self.assertIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_A = outputs.cross_attentions
self.assertIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 2 , len(__A ) )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def snake_case ( snake_case__ :int="train-batch.pt") -> str:
_A = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowercase , repo_type="""dataset""")
_A = torch.load(_lowercase , map_location=_lowercase)
return batch
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
_A = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__A )
_A = prepare_batch()
with torch.no_grad():
_A = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_A = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __A )
_A = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3] , __A , atol=__A ) )
def UpperCAmelCase ( self ) -> str:
_A = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__A )
_A = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_A = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_A = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __A )
_A = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3] , __A , atol=__A ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__A )
_A = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_A = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_A = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __A )
_A = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=__A )
_A = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __A , rtol=1E-1 ) )
| 359 | import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None:
_A = device
_A = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
_A = [0.4814_5466, 0.457_8275, 0.4082_1073]
_A = [0.2686_2954, 0.2613_0258, 0.2757_7711]
_A = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A = torchvision.transforms.Resize(2_24 )
_A = torchvision.transforms.CenterCrop(2_24 )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.resize(lowerCAmelCase_ )
_A = self.center_crop(lowerCAmelCase_ )
_A = self.normalize(lowerCAmelCase_ )
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = self.preprocess_img(lowerCAmelCase_ )
_A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
super().__init__()
_A = None
_A = device if device else get_device()
if vqgan:
_A = vqgan
else:
_A = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
_A = clip
else:
_A = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_A = ProcessorGradientFlow(device=self.device )
_A = iterations
_A = lr
_A = log
_A = make_grid
_A = return_val
_A = quantize
_A = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> Any:
_A = []
if output_path is None:
_A = """./animation.gif"""
if input_path is None:
_A = self.save_path
_A = sorted(glob(input_path + """/*""" ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_A = total_duration / len(lowerCAmelCase_ )
_A = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
_A = 1.5
_A = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> str:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_A = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device )
_A = preprocess_vqgan(lowerCAmelCase_ )
_A , *_A = self.vqgan.encode(lowerCAmelCase_ )
return z
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A = self.latent.detach().requires_grad_()
_A = base_latent + transform_vector
if self.quantize:
_A , *_A = self.vqgan.quantize(lowerCAmelCase_ )
else:
_A = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_A = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ )
_A = self.clip(**lowerCAmelCase_ )
_A = clip_outputs.logits_per_image
if weights is not None:
_A = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_A = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
_A = torch.tensor([1] , device=self.device )
_A = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
_A = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A = self._add_vector(lowerCAmelCase_ )
_A = loop_post_process(lowerCAmelCase_ )
_A = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print("""CLIP loss""" , lowerCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_A = Image.open(lowerCAmelCase_ )
_A = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if not prompts:
return []
_A = []
_A = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
_A = prompt[0]
_A = float(prompt[1] )
elif ":" in prompt:
_A , _A = prompt.split(""":""" )
_A = float(lowerCAmelCase_ )
else:
_A = prompt
_A = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str:
if image_path:
_A = self._get_latent(lowerCAmelCase_ )
else:
_A = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
_A = self.process_prompts(lowerCAmelCase_ )
_A = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
_A = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
_A = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase_ )
_A = save_path
_A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
_A = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 81 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase : List[str] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def A_ ( ) -> Optional[Any]:
a__ : Union[str, Any] = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
a__ : List[str] = get_sagemaker_input()
else:
a__ : Union[str, Any] = get_cluster_input()
return config
def A_ ( A__=None ) -> Optional[Any]:
if subparsers is not None:
a__ : Tuple = subparsers.add_parser('config' , description=A__ )
else:
a__ : Optional[Any] = argparse.ArgumentParser('Accelerate config command' , description=A__ )
parser.add_argument(
'--config_file' , default=A__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def A_ ( A__ ) -> Union[str, Any]:
a__ : List[Any] = get_user_input()
if args.config_file is not None:
a__ : Any = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
a__ : Optional[int] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(F'accelerate configuration saved at {config_file}' )
def A_ ( ) -> str:
a__ : str = config_command_parser()
a__ : Optional[Any] = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 99 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase : List[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = '''whisper'''
__A : List[Any] = ['''past_key_values''']
__A : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase=5_1865 , lowercase=80 , lowercase=6 , lowercase=4 , lowercase=6 , lowercase=4 , lowercase=1536 , lowercase=1536 , lowercase=0.0 , lowercase=0.0 , lowercase=5_0257 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=256 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=False , lowercase=1500 , lowercase=448 , lowercase=5_0256 , lowercase=5_0256 , lowercase=5_0256 , lowercase=None , lowercase=[220, 5_0256] , lowercase=False , lowercase=256 , lowercase=False , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=7 , **lowercase , ) -> str:
'''simple docstring'''
a__ : int = vocab_size
a__ : int = num_mel_bins
a__ : Optional[int] = d_model
a__ : List[str] = encoder_layers
a__ : Dict = encoder_attention_heads
a__ : List[str] = decoder_layers
a__ : Tuple = decoder_attention_heads
a__ : List[str] = decoder_ffn_dim
a__ : Optional[Any] = encoder_ffn_dim
a__ : Tuple = dropout
a__ : Optional[int] = attention_dropout
a__ : Any = activation_dropout
a__ : Any = activation_function
a__ : List[Any] = init_std
a__ : Optional[int] = encoder_layerdrop
a__ : Union[str, Any] = decoder_layerdrop
a__ : Tuple = use_cache
a__ : List[str] = encoder_layers
a__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Dict = max_source_positions
a__ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[int] = classifier_proj_size
a__ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : List[Any] = apply_spec_augment
a__ : int = mask_time_prob
a__ : int = mask_time_length
a__ : List[Any] = mask_time_min_masks
a__ : str = mask_feature_prob
a__ : Optional[int] = mask_feature_length
a__ : Union[str, Any] = mask_feature_min_masks
a__ : Tuple = median_filter_width
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , suppress_tokens=lowercase , begin_suppress_tokens=lowercase , **lowercase , )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a__ : Optional[Any] = {0: 'batch'}
else:
a__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs')
return common_inputs
def __lowercase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 2_2050 , lowercase = 5.0 , lowercase = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = OrderedDict()
a__ : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase , framework=lowercase , sampling_rate=lowercase , time_duration=lowercase , frequency=lowercase , )
a__ : List[Any] = encoder_inputs['input_features'].shape[2]
a__ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase , lowercase , lowercase , lowercase)
a__ : List[str] = encoder_inputs.pop('input_features')
a__ : Optional[int] = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a__ : List[str] = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-3
| 99 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a_ ( __snake_case : np.ndarray , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =math.sqrt(__snake_case )
lowerCamelCase_ =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int , __snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a_ ( __snake_case : int , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.zeros((kernel_size, kernel_size) )
for i in range(0 , __snake_case ):
for j in range(0 , __snake_case ):
lowerCamelCase_ =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__snake_case , __snake_case )
def a_ ( __snake_case : np.ndarray , __snake_case : float , __snake_case : float , __snake_case : int , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.zeros(img.shape )
lowerCamelCase_ =get_gauss_kernel(__snake_case , __snake_case )
lowerCamelCase_, lowerCamelCase_ =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCamelCase_ =get_slice(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase_ =img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCamelCase_ =vec_gaussian(__snake_case , __snake_case )
lowerCamelCase_ =np.multiply(__snake_case , __snake_case )
lowerCamelCase_ =np.multiply(__snake_case , __snake_case )
lowerCamelCase_ =np.sum(__snake_case ) / np.sum(__snake_case )
lowerCamelCase_ =val
return imga
def a_ ( __snake_case : list ) -> tuple:
"""simple docstring"""
lowerCamelCase_ =args[1] if args[1:] else '''../image_data/lena.jpg'''
lowerCamelCase_ =float(args[2] ) if args[2:] else 1.0
lowerCamelCase_ =float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCamelCase_ =int(args[4] )
lowerCamelCase_ =kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCamelCase_ =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a_ : Union[str, Any] = parse_args(sys.argv)
a_ : Dict = cva.imread(filename, 0)
cva.imshow("""input image""", img)
a_ : Tuple = img / 2_55
a_ : Optional[Any] = out.astype("""float32""")
a_ : List[str] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a_ : Union[str, Any] = out * 2_55
a_ : Union[str, Any] = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
'''simple docstring'''
lowercase__ = data
lowercase__ = previous
lowercase__ = next_node
def __str__( self ) -> str:
'''simple docstring'''
return F'''{self.data}'''
def A__ ( self ) -> int:
'''simple docstring'''
return self.data
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.next
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.previous
class A :
def __init__( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = head
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
return self
def A__ ( self ) -> Any:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
lowercase__ = self.current.get_data()
lowercase__ = self.current.get_next()
return value
class A :
def __init__( self ) -> List[str]:
'''simple docstring'''
lowercase__ = None # First node in list
lowercase__ = None # Last node in list
def __str__( self ) -> Any:
'''simple docstring'''
lowercase__ = self.head
lowercase__ = []
while current is not None:
nodes.append(current.get_data() )
lowercase__ = current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.head
while current:
if current.get_data() == value:
return True
lowercase__ = current.get_next()
return False
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def A__ ( self ) -> str:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def A__ ( self ) -> Dict:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
if self.head is None:
lowercase__ = node
lowercase__ = node
else:
self.insert_before_node(self.head , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = node
lowercase__ = node.previous
if node.get_previous() is None:
lowercase__ = node_to_insert
else:
lowercase__ = node_to_insert
lowercase__ = node_to_insert
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = node
lowercase__ = node.next
if node.get_next() is None:
lowercase__ = node_to_insert
else:
lowercase__ = node_to_insert
lowercase__ = node_to_insert
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = 1
lowercase__ = Node(lowerCamelCase__ )
lowercase__ = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ , lowerCamelCase__ )
return
current_position += 1
lowercase__ = node.next
self.insert_after_node(self.tail , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Node:
'''simple docstring'''
lowercase__ = self.head
while node:
if node.get_data() == item:
return node
lowercase__ = node.get_next()
raise Exception("""Node not found""" )
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
lowercase__ = self.head.get_next()
if node == self.tail:
lowercase__ = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def A__ ( lowerCamelCase__ ) -> None:
'''simple docstring'''
if node.get_next():
lowercase__ = node.previous
if node.get_previous():
lowercase__ = node.next
lowercase__ = None
lowercase__ = None
def A__ ( self ) -> Any:
'''simple docstring'''
return self.head is None
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class A ( __UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : str = RetriBertTokenizer
lowerCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase__ )
lowercase__ = do_lower_case
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict:
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 164 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , __snake_case : List[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=9_9 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=0.02 , ) -> Tuple:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : Tuple = pad_token_id
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : int = initializer_range
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : Dict = shift_tokens_right(_UpperCamelCase , 1 , 2 )
UpperCAmelCase_ : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Dict = 2_0
UpperCAmelCase_ : int = model_class_name(_UpperCamelCase )
UpperCAmelCase_ : Any = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : str = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase_ : Dict = model.decode(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Union[str, Any] = 2_0
UpperCAmelCase_ : Dict = model_class_name(_UpperCamelCase )
UpperCAmelCase_ : int = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase_ , UpperCAmelCase_ : str = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Any = 9_9
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : str = input_ids.shape[0]
UpperCAmelCase_ : int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = self._get_config_and_data()
UpperCAmelCase_ : Dict = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
UpperCAmelCase_ : int = lm_model(input_ids=_UpperCamelCase )
UpperCAmelCase_ : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCAmelCase_ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
UpperCAmelCase_ : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Union[str, Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : List[Any] = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
UpperCAmelCase_ : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Tuple = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(_UpperCamelCase , 1 , 2 )
UpperCAmelCase_ : Dict = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Tuple = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase (_snake_case , unittest.TestCase , _snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = True
_snake_case : List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Union[str, Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : Optional[int] = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : Dict = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[str] = model_class(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCAmelCase_ : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : List[str] = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : int = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Dict = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
UpperCAmelCase_ : List[str] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCAmelCase_ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCAmelCase_ : Optional[Any] = ['Sam']
UpperCAmelCase_ : Any = tokenizer(_UpperCamelCase , return_tensors='jax' )
UpperCAmelCase_ : List[str] = model.generate(**_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCAmelCase_ : Any = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 145 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> int:
super().__init__()
UpperCAmelCase_ : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : Optional[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = torch.nn.Parameter(_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : VQModel
_snake_case : CLIPTextModel
_snake_case : CLIPTokenizer
_snake_case : TransformeraDModel
_snake_case : LearnedClassifierFreeSamplingEmbeddings
_snake_case : VQDiffusionScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(
vqvae=_UpperCamelCase , transformer=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , scheduler=_UpperCamelCase , learned_classifier_free_sampling_embeddings=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = len(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else 1
# get prompt text embeddings
UpperCAmelCase_ : str = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase_ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase_ : str = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Dict = prompt_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase , 1 , 1 )
else:
UpperCAmelCase_ : List[Any] = [''] * batch_size
UpperCAmelCase_ : List[Any] = text_input_ids.shape[-1]
UpperCAmelCase_ : Dict = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' , )
UpperCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[Any] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : Dict = negative_prompt_embeds.repeat(1 , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 5.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = 1
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Union[str, Any] = batch_size * num_images_per_prompt
UpperCAmelCase_ : Optional[int] = guidance_scale > 1.0
UpperCAmelCase_ : Any = self._encode_prompt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_UpperCamelCase )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Tuple = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : List[Any] = torch.full(_UpperCamelCase , _UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase_ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Dict = self.transformer(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_output.chunk(2 )
UpperCAmelCase_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_UpperCamelCase , dim=1 , keepdim=_UpperCamelCase )
UpperCAmelCase_ : str = self.truncate(_UpperCamelCase , _UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : Optional[int] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase , shape=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.vqvae.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase ).sample
UpperCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : int = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : int = torch.sort(_UpperCamelCase , 1 , descending=_UpperCamelCase )
UpperCAmelCase_ : Dict = torch.exp(_UpperCamelCase )
UpperCAmelCase_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase_ : int = keep_mask[:, :-1, :]
UpperCAmelCase_ : Any = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase_ : str = log_p_x_0.clone()
UpperCAmelCase_ : Any = -torch.inf # -inf = log(0)
return rv
| 145 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A : Dict = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''upernet'''
def __init__(self : List[str] , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[Any]=[1, 2, 3, 6] , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=0.4 , _UpperCAmelCase : Any=384 , _UpperCAmelCase : Tuple=256 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=255 , **_UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = backbone_config.get("""model_type""" )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(_UpperCAmelCase )
lowercase__ = backbone_config
lowercase__ = hidden_size
lowercase__ = initializer_range
lowercase__ = pool_scales
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_in_channels
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = loss_ignore_index
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 305 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=4 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = FlaxBertModelTester(self )
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = FlaxBertModel.from_pretrained("bert-base-cased" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
| 362 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case_ = Stack()
snake_case_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
snake_case_ = operator_stack.peek()
operator_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operators[opr](snake_case , snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 92 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="vit_mae"
def __init__( self , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1E-12 , _A=224 , _A=16 , _A=3 , _A=True , _A=16 , _A=512 , _A=8 , _A=2048 , _A=0.75 , _A=False , **_A , ) -> int:
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = decoder_num_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = decoder_num_hidden_layers
SCREAMING_SNAKE_CASE_ = decoder_intermediate_size
SCREAMING_SNAKE_CASE_ = mask_ratio
SCREAMING_SNAKE_CASE_ = norm_pix_loss
| 299 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : str = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 84 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _lowerCamelCase:
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : Optional[int] = UNetaDConditionModel(
sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
_lowercase : Dict = DDPMScheduler(
num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', )
torch.manual_seed(0)
_lowercase : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : List[str] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : List[str] = UNetaDConditionModel(
sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', class_embed_type='timestep', mid_block_scale_factor=1.4_1_4, time_embedding_act_fn='gelu', time_embedding_dim=32, )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
_lowercase : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', )
torch.manual_seed(0)
_lowercase : str = DDPMScheduler(
num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, )
torch.manual_seed(0)
_lowercase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : int = inputs['prompt']
_lowercase : Dict = inputs['generator']
_lowercase : Optional[int] = inputs['num_inference_steps']
_lowercase : str = inputs['output_type']
if "image" in inputs:
_lowercase : List[Any] = inputs['image']
else:
_lowercase : List[Any] = None
if "mask_image" in inputs:
_lowercase : Union[str, Any] = inputs['mask_image']
else:
_lowercase : Dict = None
if "original_image" in inputs:
_lowercase : Any = inputs['original_image']
else:
_lowercase : Tuple = None
_lowercase , _lowercase : str = pipe.encode_prompt(lowerCamelCase)
# inputs with prompt converted to embeddings
_lowercase : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_lowercase : int = image
if mask_image is not None:
_lowercase : str = mask_image
if original_image is not None:
_lowercase : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : Dict = pipe(**lowerCamelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = self.pipeline_class.from_pretrained(lowerCamelCase)
pipe_loaded.to(lowerCamelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase, lowerCamelCase) is None, F'''`{optional_component}` did not stay set to None after loading.''', )
_lowercase : Dict = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[Any] = inputs['generator']
_lowercase : Any = inputs['num_inference_steps']
_lowercase : List[Any] = inputs['output_type']
# inputs with prompt converted to embeddings
_lowercase : Optional[int] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_lowercase : str = image
if mask_image is not None:
_lowercase : Optional[int] = mask_image
if original_image is not None:
_lowercase : int = original_image
_lowercase : str = pipe_loaded(**lowerCamelCase)[0]
_lowercase : List[Any] = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max()
self.assertLess(lowerCamelCase, 1E-4)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = pipe(**lowerCamelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase)
_lowercase : List[str] = self.pipeline_class.from_pretrained(lowerCamelCase)
pipe_loaded.to(lowerCamelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = pipe_loaded(**lowerCamelCase)[0]
_lowercase : str = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max()
self.assertLess(lowerCamelCase, 1E-4)
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ :Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : List[Any] = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Optional[Any] = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
a_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : str = do_resize
a_ : Optional[int] = size
a_ : Dict = resample
a_ : Optional[int] = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_normalize
a_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : Any = do_convert_rgb
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
a_ : Union[str, Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
a_ : List[str] = (size["""height"""], size["""width"""])
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Optional[Any]:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Any = resample if resample is not None else self.resample
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
a_ : Optional[Any] = image_std if image_std is not None else self.image_std
a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : str = size if size is not None else self.size
a_ : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Optional[Any] = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
a_ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
a_ : Optional[int] = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
a_ : Union[str, Any] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
a_ : str = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
a_ : Optional[Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=_lowercase )
return encoded_outputs
| 248 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a__ : List[str] = None
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a__ : Dict = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
a__ : Optional[Any] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
a__ : Tuple = "▁"
class a_ ( _UpperCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = AlbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="[CLS]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ) ->Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE : Optional[int] = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = remove_space
SCREAMING_SNAKE_CASE : str = keep_accents
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->int:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 370 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a__ : List[str] = None
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Dict = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a__ : str = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
a__ : List[str] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Tuple = MBartTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) ->List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : List[Any] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : int = {
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCAmelCase ( self ) ->str:
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[str] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = "en_XX" , _lowerCamelCase = None , _lowerCamelCase = "ro_RO" , **_lowerCamelCase , ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[str] = src_lang
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self ) ->List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : str = self.convert_tokens_to_ids(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Any = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 19 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__A : Any = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : List[Any] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'{key} -> {new_key}' )
_UpperCAmelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = emb.weight.data
return lin_layer
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = os.path.basename(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = url.split('''/''' )[-2]
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(_SCREAMING_SNAKE_CASE ) as source, open(_SCREAMING_SNAKE_CASE , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_SCREAMING_SNAKE_CASE , unit_divisor=1024 ) as loop:
while True:
_UpperCAmelCase = source.read(8192 )
if not buffer:
break
output.write(_SCREAMING_SNAKE_CASE )
loop.update(len(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCAmelCase = original_checkpoint['''dims''']
_UpperCAmelCase = original_checkpoint['''model_state_dict''']
_UpperCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_SCREAMING_SNAKE_CASE , decoder_ffn_dim=_SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A : Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 260 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A : str = sys.version_info >= (3, 10)
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : Tuple )->Optional[int]:
_UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def lowercase__ ( self : int )->str:
_UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase )
self.assertFalse(example.flag )
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
_UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase__ ( self : List[str] )->List[str]:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def lowercase__ ( self : int )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
__UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
_UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) )
_UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 260 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : int =DDIMPipeline
UpperCamelCase_ : Dict =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase_ : Optional[int] =PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
UpperCamelCase_ : Union[str, Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase_ : Any =False
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__lowercase= DDIMScheduler()
__lowercase= {'unet': unet, 'scheduler': scheduler}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _A (self ):
__lowercase= 'cpu'
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= pipe(**lowerCAmelCase ).images
__lowercase= image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
__lowercase= np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__lowercase= np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1E-3 )
def _A (self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _A (self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _A (self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _A (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= 'google/ddpm-cifar10-32'
__lowercase= UNetaDModel.from_pretrained(lowerCAmelCase )
__lowercase= DDIMScheduler()
__lowercase= DDIMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
ddim.to(lowerCAmelCase )
ddim.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= torch.manual_seed(0 )
__lowercase= ddim(generator=lowerCAmelCase , eta=0.0 , output_type='numpy' ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'google/ddpm-ema-bedroom-256'
__lowercase= UNetaDModel.from_pretrained(lowerCAmelCase )
__lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase )
__lowercase= DDIMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
ddpm.to(lowerCAmelCase )
ddpm.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= torch.manual_seed(0 )
__lowercase= ddpm(generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase= np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 304 |
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__snake_case =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__snake_case =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a_ ( lowerCamelCase : list[list[int]] ):
lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase )
return next_generation
def a_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ):
lowerCAmelCase = []
for _ in range(lowerCamelCase ):
# Create output image
lowerCAmelCase = Image.new('RGB' , (len(cells[0] ), len(lowerCamelCase )) )
lowerCAmelCase = img.load()
# Save cells to image
for x in range(len(lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase = 255 - cells[y][x] * 255
lowerCAmelCase = (colour, colour, colour)
# Save image
images.append(lowerCamelCase )
lowerCAmelCase = new_generation(lowerCamelCase )
return images
if __name__ == "__main__":
__snake_case =generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( __A, __A, __A ):
"""simple docstring"""
lowerCamelCase = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 5_0257 , _lowerCamelCase = 1024 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = None , _lowerCamelCase = "gelu_new" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 0.02 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = False , ) -> Tuple:
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Optional[Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : Tuple = (
nn.Linear(self.prefix_hidden_dim , _lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[str] = GPTaConfig(
vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , )
A_ : Optional[Any] = GPTaLMHeadModel(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ) -> Tuple:
A_ : Tuple = self.transformer.transformer.wte(_lowerCamelCase )
A_ : int = self.encode_prefix(_lowerCamelCase )
A_ : Optional[Any] = self.decode_prefix(_lowerCamelCase )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : List[str] = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Dict = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> torch.Tensor:
return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
return self.encode_prefix(_lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
A_ : Optional[Any] = torch.split(_lowerCamelCase , 1 , dim=0 )
A_ : List[Any] = []
A_ : str = []
for feature in features:
A_ : int = self.decode_prefix(feature.to(_lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : List[Any] = self.generate_beam(
input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : List[str] = torch.stack(_lowerCamelCase )
A_ : Optional[Any] = torch.stack(_lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = 5 , _lowerCamelCase = 67 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ) -> Tuple:
A_ : int = eos_token_id
A_ : Union[str, Any] = None
A_ : str = None
A_ : Tuple = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int )
A_ : str = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
A_ : List[Any] = input_embeds
else:
A_ : List[str] = self.transformer.transformer.wte(_lowerCamelCase )
for i in range(_lowerCamelCase ):
A_ : str = self.transformer(inputs_embeds=_lowerCamelCase )
A_ : Any = outputs.logits
A_ : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : Optional[Any] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Optional[Any] = logits.topk(_lowerCamelCase , -1 )
A_ : Union[str, Any] = generated.expand(_lowerCamelCase , *generated.shape[1:] )
A_ , A_ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : List[Any] = next_tokens
else:
A_ : Tuple = tokens.expand(_lowerCamelCase , *tokens.shape[1:] )
A_ : Optional[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : Union[str, Any] = 0
A_ : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : List[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : Optional[Any] = scores_sum_average.view(-1 ).topk(_lowerCamelCase , -1 )
A_ : List[Any] = next_tokens // scores_sum.shape[1]
A_ : Dict = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Optional[int] = next_tokens.unsqueeze(1 )
A_ : Dict = tokens[next_tokens_source]
A_ : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
A_ : List[Any] = generated[next_tokens_source]
A_ : Dict = scores_sum_average * seq_lengths
A_ : Any = is_stopped[next_tokens_source]
A_ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Any = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Optional[Any] = is_stopped + next_tokens.eq(_lowerCamelCase ).squeeze()
if is_stopped.all():
break
A_ : List[Any] = scores / seq_lengths
A_ : int = scores.argsort(descending=_lowerCamelCase )
# tokens tensors are already padded to max_seq_length
A_ : int = [tokens[i] for i in order]
A_ : List[str] = torch.stack(_lowerCamelCase , dim=0 )
A_ : Optional[int] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 164 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Dict = []
for part_id in partition_order:
A_ : List[str] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
A_ : Optional[Any] = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[str] = spark.range(1_0 ).repartition(2 )
A_ : List[str] = [1, 0]
A_ : List[Any] = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Dict = spark.range(1_0 ).repartition(1 )
A_ : int = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Union[str, Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A_ : Optional[int] = lambda a_ : x.reverse()
A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
A_ : Any = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
A_ : str = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : Optional[Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(1_0_0 ).repartition(1 )
A_ : str = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 164 | 1 |
'''simple docstring'''
_lowercase : Union[str, Any] = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 93 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] ,*A : Optional[int] ,**A : int ):
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : str ,*A : Union[str, Any] ,**A : List[Any] ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : List[str] ,*A : Tuple ,**A : List[Any] ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : int ,*A : str ,**A : int ):
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ,*A : List[Any] ,**A : Any ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : List[str] ,*A : int ,**A : Optional[int] ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Any ,*A : Any ,**A : str ):
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ,*A : str ,**A : Any ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,*A : int ,**A : int ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] ,*A : Tuple ,**A : str ):
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Dict ,*A : Any ,**A : Dict ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Tuple ,*A : Any ,**A : Any ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Any ,*A : int ,**A : Any ):
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ,*A : List[Any] ,**A : List[Any] ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Any ,*A : List[Any] ,**A : Optional[Any] ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] ,*A : Optional[int] ,**A : int ):
requires_backends(self ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] ,*A : Tuple ,**A : Tuple ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] ,*A : int ,**A : int ):
requires_backends(cls ,["torch", "transformers", "onnx"] )
| 124 |
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE :Dict = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase ( a_ , a_ , a_ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase_ , int(b / 2 ) ) * actual_power(lowercase_ , int(b / 2 ) )
else:
return a * actual_power(lowercase_ , int(b / 2 ) ) * actual_power(lowercase_ , int(b / 2 ) )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowercase_ , lowercase_ )
return actual_power(lowercase_ , lowercase_ )
if __name__ == "__main__":
print(power(-2, -3)) | 286 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> int:
super().__init__(
split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , **A_ , )
SCREAMING_SNAKE_CASE = load_from_cache_file
SCREAMING_SNAKE_CASE = file_format
SCREAMING_SNAKE_CASE = Spark(
df=A_ , features=A_ , cache_dir=A_ , working_dir=A_ , **A_ , )
def __A ( self ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 371 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Any = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 38 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """trocr"""
_lowerCamelCase : List[str] = ["""past_key_values"""]
_lowerCamelCase : int = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Union[str, Any] , snake_case_ : Dict=5_0_2_6_5 , snake_case_ : Dict=1_0_2_4 , snake_case_ : Optional[Any]=1_2 , snake_case_ : int=1_6 , snake_case_ : Tuple=4_0_9_6 , snake_case_ : List[Any]="gelu" , snake_case_ : Dict=5_1_2 , snake_case_ : str=0.1 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[str]=2 , snake_case_ : int=0.0_2 , snake_case_ : int=0.0 , snake_case_ : Union[str, Any]=True , snake_case_ : str=False , snake_case_ : Tuple=True , snake_case_ : List[str]=True , snake_case_ : Optional[int]=1 , snake_case_ : int=0 , snake_case_ : Dict=2 , **snake_case_ : List[Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = init_std
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_learned_position_embeddings
_UpperCAmelCase = layernorm_embedding
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> int:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 53 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Any = TransfoXLTokenizer
__A : int = False
__A : str = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowercase :List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self : Optional[int] , **snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def __snake_case ( self : Tuple , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[Any] = '''<unk> UNwanted , running'''
lowercase :Dict = '''<unk> unwanted, running'''
return input_text, output_text
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Union[str, Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=snake_case__ )
lowercase :str = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(snake_case__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [0, 4, 8, 7] )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :int = TransfoXLTokenizer(lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :List[Any] = TransfoXLTokenizer(lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[str] = TransfoXLTokenizer(lower_case=snake_case__ )
lowercase :Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase :Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.convert_tokens_to_string(snake_case__ ) , snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :str = self.get_tokenizer()
lowercase :Dict = len(snake_case__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(snake_case__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 172 |
"""simple docstring"""
def lowerCamelCase (a_ :int) -> None:
lowercase :Tuple = generate_pascal_triangle(a_)
for row_idx in range(a_):
# Print left spaces
for _ in range(num_rows - row_idx - 1):
print(end=''' ''')
# Print row values
for col_idx in range(row_idx + 1):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''')
else:
print(triangle[row_idx][col_idx] , end='''''')
print()
def lowerCamelCase (a_ :int) -> list[list[int]]:
if not isinstance(a_ , a_):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''')
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''')
lowercase :list[list[int]] = []
for current_row_idx in range(a_):
lowercase :Union[str, Any] = populate_current_row(a_ , a_)
triangle.append(a_)
return triangle
def lowerCamelCase (a_ :list[list[int]] , a_ :int) -> list[int]:
lowercase :List[str] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase , lowercase :Dict = 1, 1
for current_col_idx in range(1 , a_):
calculate_current_element(
a_ , a_ , a_ , a_)
return current_row
def lowerCamelCase (a_ :list[list[int]] , a_ :list[int] , a_ :int , a_ :int , ) -> None:
lowercase :str = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase :Dict = triangle[current_row_idx - 1][current_col_idx]
lowercase :Any = above_to_left_elt + above_to_right_elt
def lowerCamelCase (a_ :int) -> list[list[int]]:
if not isinstance(a_ , a_):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''')
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''')
lowercase :list[list[int]] = [[1]]
for row_index in range(1 , a_):
lowercase :Union[str, Any] = [0] + result[-1] + [0]
lowercase :Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
lowercase :List[str] = sum(divmod(a_ , 2))
lowercase :Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1)
]
lowercase :Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase :Dict = row_first_half + row_second_half
result.append(a_)
return result
def lowerCamelCase () -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a_ :Callable , a_ :int) -> None:
lowercase :int = F"""{func.__name__}({value})"""
lowercase :Union[str, Any] = timeit(F"""__main__.{call}""" , setup='''import __main__''')
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""")
for value in range(15): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a_ , a_)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 172 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__snake_case = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ):
"""simple docstring"""
_a = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
_a = bs[:]
_a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
_a = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase, _lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
return pairs
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : str = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> int:
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_a = json.load(__UpperCAmelCase )
_a = {v: k for k, v in self.encoder.items()}
_a = errors # how to handle errors in decoding
_a = bytes_to_unicode()
_a = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
_a = merges_handle.read().split('''\n''' )[1:-1]
_a = [tuple(merge.split() ) for merge in bpe_merges]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = {}
_a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_a = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCAmelCase ( self ) -> List[str]:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if token in self.cache:
return self.cache[token]
_a = tuple(__UpperCAmelCase )
_a = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_a = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__UpperCAmelCase ):
try:
_a = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__UpperCAmelCase )
_a = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_a = get_pairs(__UpperCAmelCase )
_a = ''' '''.join(__UpperCAmelCase )
_a = word
return word
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_a = []
for token in re.findall(self.pat , __UpperCAmelCase ):
_a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.decoder.get(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_a = ''''''.join(__UpperCAmelCase )
_a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '''\n''' )
_a = 0
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_a = token_index
writer.write(''' '''.join(__UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ) -> List[Any]:
_a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
_a = ''' ''' + text
return (text, kwargs)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> dict:
_a = super()._pad(
encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_a = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_a = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_a = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCAmelCase )
if needs_to_be_padded:
_a = len(__UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_a = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_a = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs | 320 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : Optional[int]=18 , SCREAMING_SNAKE_CASE : Dict=30 , SCREAMING_SNAKE_CASE : List[str]=4_00 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : Union[str, Any] = min_resolution
UpperCamelCase__ : Optional[int] = max_resolution
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : Tuple = size if size is not None else {"height": 18, "width": 20}
UpperCamelCase__ : int = do_thumbnail
UpperCamelCase__ : Any = do_align_axis
UpperCamelCase__ : List[str] = do_pad
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : str = image_mean
UpperCamelCase__ : str = image_std
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Tuple = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = DonutImageProcessingTester(self )
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_thumbnail" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_align_long_axis" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
UpperCamelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@is_flaky()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase__ : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase__ : str = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , ) | 196 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
UpperCamelCase__ : int = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
UpperCamelCase__ : List[Any] = 0
while number > 0:
UpperCamelCase__ : List[Any] = number % 10
sum_of_digits += last_digit
UpperCamelCase__ : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
UpperCamelCase__ : Optional[Any] = factorial(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = split_and_add(__lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 196 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = dct.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCamelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__lowerCamelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__lowerCamelCase = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
__lowerCamelCase = qkv_bias
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = 364 if 'coco' in model_name else 224
__lowerCamelCase = InstructBlipVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__lowerCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
__lowerCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__lowerCamelCase = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
__lowerCamelCase = InstructBlipConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ , qformer_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
__lowerCamelCase = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__lowerCamelCase = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
__lowerCamelCase , __lowerCamelCase = get_blipa_config(UpperCamelCase__ )
__lowerCamelCase = InstructBlipForConditionalGeneration(UpperCamelCase__ ).eval()
__lowerCamelCase = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
__lowerCamelCase , __lowerCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__lowerCamelCase = 'cuda:1' if torch.cuda.is_available() else 'cpu'
__lowerCamelCase = 'cuda:2' if torch.cuda.is_available() else 'cpu'
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print('Done!' )
# update state dict keys
__lowerCamelCase = original_model.state_dict()
__lowerCamelCase = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('Qformer.bert' ):
__lowerCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__lowerCamelCase = key.replace('self' , 'attention' )
if "llm_proj" in key:
__lowerCamelCase = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
__lowerCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
__lowerCamelCase = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
__lowerCamelCase = key.replace('t5' , 'language' )
__lowerCamelCase = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
__lowerCamelCase = load_demo_image()
__lowerCamelCase = 'What is unusual about this image?'
# create processor
__lowerCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
__lowerCamelCase = InstructBlipProcessor(
image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ , )
__lowerCamelCase = processor(images=UpperCamelCase__ , text=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
__lowerCamelCase = vis_processors['eval'](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
__lowerCamelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
__lowerCamelCase = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
__lowerCamelCase = hf_model(**UpperCamelCase__ ).logits
else:
__lowerCamelCase = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
__lowerCamelCase = tokenizer('\n' , return_tensors='pt' ).input_ids.to(UpperCamelCase__ )
__lowerCamelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
__lowerCamelCase = hf_model(**UpperCamelCase__ , labels=UpperCamelCase__ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__lowerCamelCase = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase__ , atol=UpperCamelCase__ )
print('Looks ok!' )
print('Generating with original model...' )
__lowerCamelCase = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
__lowerCamelCase = hf_model.generate(
**UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__lowerCamelCase = 2
print('Original generation:' , UpperCamelCase__ )
__lowerCamelCase = processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
__lowerCamelCase = [text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 90 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if attention_mask is None:
_UpperCamelCase : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCamelCase : Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCamelCase : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCAmelCase_ )
if decoder_head_mask is None:
_UpperCamelCase : Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase_ )
if cross_attn_head_mask is None:
_UpperCamelCase : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=13 ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Union[str, Any]=99 ,lowerCamelCase__ : int=16 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : Union[str, Any]="relu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : List[Any]=20 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Any=1 ,lowerCamelCase__ : str=0 ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Tuple = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : int = encoder_layerdrop
_UpperCamelCase : Any = decoder_layerdrop
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : Any = eos_token_id
_UpperCamelCase : Optional[int] = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Optional[Any] = self.eos_token_id # Eos Token
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCamelCase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase : int = self.get_config()
_UpperCamelCase : Union[str, Any] = prepare_mam_aaa_inputs_dict(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return config, inputs_dict
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = MaMaaaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
_UpperCamelCase : Tuple = inputs_dict['input_ids']
_UpperCamelCase : Tuple = inputs_dict['attention_mask']
_UpperCamelCase : Optional[Any] = inputs_dict['head_mask']
# first forward pass
_UpperCamelCase : Any = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,head_mask=lowerCamelCase__ ,use_cache=lowerCamelCase__ )
_UpperCamelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
_UpperCamelCase : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
_UpperCamelCase : Dict = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
_UpperCamelCase : int = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )['last_hidden_state']
_UpperCamelCase : str = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ )[
'last_hidden_state'
]
# select random slice
_UpperCamelCase : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-2 ) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[str] = MaMaaaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_UpperCamelCase : Tuple = model(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = outputs.encoder_last_hidden_state
_UpperCamelCase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : List[str] = model.get_encoder()
encoder.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : int = MaMaaaEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCamelCase : Dict = encoder(inputs_dict['input_ids'] ,attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = MaMaaaDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCamelCase : List[str] = decoder(
input_ids=inputs_dict['decoder_input_ids'] ,attention_mask=inputs_dict['decoder_attention_mask'] ,encoder_hidden_states=lowerCamelCase__ ,encoder_attention_mask=inputs_dict['attention_mask'] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : str = MaMaaaModelTester(self )
_UpperCamelCase : str = ConfigTester(self ,config_class=lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Dict = model_class.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertEqual(info['missing_keys'] ,[] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_UpperCamelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[Any] = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
if not self.is_encoder_decoder:
_UpperCamelCase : Tuple = inputs['input_ids']
del inputs["input_ids"]
else:
_UpperCamelCase : List[Any] = inputs['input_ids']
_UpperCamelCase : Tuple = inputs.get('decoder_input_ids' ,lowerCamelCase__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
_UpperCamelCase : List[Any] = wte(lowerCamelCase__ )
else:
_UpperCamelCase : Dict = wte(lowerCamelCase__ )
_UpperCamelCase : List[str] = wte(lowerCamelCase__ )
with torch.no_grad():
model(**lowerCamelCase__ )[0]
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase : Union[str, Any] = input_dict['input_ids']
_UpperCamelCase : int = input_ids.ne(1 ).to(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = MaMaaaForConditionalGeneration(lowerCamelCase__ ).eval().to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
model.generate(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
model.generate(num_beams=4 ,do_sample=lowerCamelCase__ ,early_stopping=lowerCamelCase__ ,num_return_sequences=3 )
def A__ ( UpperCAmelCase_ ):
return torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
snake_case_ : Union[str, Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(lowerCamelCase__ )
_UpperCamelCase : List[Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_UpperCamelCase : Any = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_UpperCamelCase : str = prepare_mam_aaa_inputs_dict(model.config ,lowerCamelCase__ ,lowerCamelCase__ )
with torch.no_grad():
_UpperCamelCase : Any = model(**lowerCamelCase__ )[0]
_UpperCamelCase : List[str] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape ,lowerCamelCase__ )
# change to expected output here
_UpperCamelCase : int = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Any = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(lowerCamelCase__ )
# change to intended input
_UpperCamelCase : Optional[int] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_UpperCamelCase : List[Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_UpperCamelCase : Optional[Any] = prepare_mam_aaa_inputs_dict(model.config ,lowerCamelCase__ ,lowerCamelCase__ )
with torch.no_grad():
_UpperCamelCase : int = model(**lowerCamelCase__ )[0]
_UpperCamelCase : List[Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape ,lowerCamelCase__ )
# change to expected output here
_UpperCamelCase : Optional[int] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ,src_lang='fr' ,tgt_lang='en' )
_UpperCamelCase : Optional[Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_UpperCamelCase : List[str] = tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors='pt' )
_UpperCamelCase : List[Any] = model.generate(
input_ids=dct['input_ids'].to(lowerCamelCase__ ) ,attention_mask=dct['attention_mask'].to(lowerCamelCase__ ) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id('en' ) ,)
_UpperCamelCase : Tuple = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
_UpperCamelCase : int = tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
assert generated == expected_en
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Any = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BeitFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150 | """simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase__ ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
snake_case = iter(_UpperCamelCase )
while True:
snake_case = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case = ''
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> list[str]:
"""simple docstring"""
snake_case = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = prepare_input(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 150 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCamelCase : Any = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class A__ ( lowercase__ ):
_UpperCAmelCase :Optional[Any] = 'albert'
def __init__( self , A_=3_0000 , A_=128 , A_=4096 , A_=12 , A_=1 , A_=64 , A_=1_6384 , A_=1 , A_="gelu_new" , A_=0 , A_=0 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_="absolute" , A_=0 , A_=2 , A_=3 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[Any] = embedding_size
UpperCamelCase : int = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[int] = num_hidden_groups
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Any = inner_group_num
UpperCamelCase : Union[str, Any] = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Any = initializer_range
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : Optional[int] = classifier_dropout_prob
UpperCamelCase : Optional[int] = position_embedding_type
class A__ ( lowercase__ ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 358 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = num_patches + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFDeiTModel(config=A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFDeiTForMaskedImageModeling(config=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[Any] = TFDeiTForMaskedImageModeling(A_ )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFDeiTModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Dense ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
UpperCamelCase : str = model(**A_ )
# verify the logits
UpperCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 140 | 0 |
"""simple docstring"""
_UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =True
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase , lowercase , lowercase )
order.append(lowercase )
return order
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: Optional[int] =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase , lowercase , lowercase )
return component
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =len(lowercase ) * [False]
SCREAMING_SNAKE_CASE_: dict[int, list[int]] ={vert: [] for vert in range(len(lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase )
SCREAMING_SNAKE_CASE_: Dict =[]
for i, was_visited in enumerate(lowercase ):
if not was_visited:
order += topology_sort(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: str =[]
SCREAMING_SNAKE_CASE_: Optional[int] =len(lowercase ) * [False]
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE_: Optional[Any] =order[len(lowercase ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =find_components(lowercase , lowercase , lowercase )
components_list.append(lowercase )
return components_list
| 173 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'unispeech'
def __init__( self : int , lowerCAmelCase : Dict=32 , lowerCAmelCase : Any=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Tuple=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : List[Any]=1E-5 , lowerCAmelCase : List[str]="group" , lowerCAmelCase : int="gelu" , lowerCAmelCase : Dict=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[int]=128 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : str=False , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=0.0_5 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : List[str]=10 , lowerCAmelCase : str=0 , lowerCAmelCase : str=320 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : Dict=256 , lowerCAmelCase : str=256 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict="mean" , lowerCAmelCase : Dict=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Any=256 , lowerCAmelCase : List[str]=80 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : int=1 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Union[str, Any]=0.5 , **lowerCAmelCase : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Dict =feat_extract_norm
SCREAMING_SNAKE_CASE_: Tuple =feat_extract_activation
SCREAMING_SNAKE_CASE_: Tuple =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =conv_bias
SCREAMING_SNAKE_CASE_: Optional[int] =num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_: Dict =num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_: List[str] =len(self.conv_dim )
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout
SCREAMING_SNAKE_CASE_: List[Any] =attention_dropout
SCREAMING_SNAKE_CASE_: List[Any] =activation_dropout
SCREAMING_SNAKE_CASE_: Dict =feat_proj_dropout
SCREAMING_SNAKE_CASE_: Dict =final_dropout
SCREAMING_SNAKE_CASE_: Tuple =layerdrop
SCREAMING_SNAKE_CASE_: Tuple =layer_norm_eps
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] =num_ctc_classes
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =do_stable_layer_norm
SCREAMING_SNAKE_CASE_: List[str] =use_weighted_layer_sum
SCREAMING_SNAKE_CASE_: str =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_: Optional[int] =apply_spec_augment
SCREAMING_SNAKE_CASE_: str =mask_time_prob
SCREAMING_SNAKE_CASE_: List[Any] =mask_time_length
SCREAMING_SNAKE_CASE_: Any =mask_time_min_masks
SCREAMING_SNAKE_CASE_: Union[str, Any] =mask_feature_prob
SCREAMING_SNAKE_CASE_: Dict =mask_feature_length
SCREAMING_SNAKE_CASE_: Optional[Any] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_: List[str] =num_codevectors_per_group
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_codevector_groups
SCREAMING_SNAKE_CASE_: List[str] =contrastive_logits_temperature
SCREAMING_SNAKE_CASE_: str =feat_quantizer_dropout
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_negatives
SCREAMING_SNAKE_CASE_: Optional[Any] =codevector_dim
SCREAMING_SNAKE_CASE_: str =proj_codevector_dim
SCREAMING_SNAKE_CASE_: Tuple =diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_: str =ctc_loss_reduction
SCREAMING_SNAKE_CASE_: List[str] =ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE_: Optional[int] =replace_prob
@property
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 173 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowerCAmelCase ( lowercase : str , lowercase : List[Any] , lowercase : Tuple = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __lowerCAmelCase ( lowercase : str , lowercase : int , lowercase : str = 10**-1 ) -> bool:
"""simple docstring"""
snake_case : Optional[Any] = cross(lowercase , lowercase )
snake_case : Dict = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
__snake_case = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__snake_case = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__snake_case = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__snake_case = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 352 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 | 0 |
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/transformers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : str = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None ) -> List[str]:
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[int] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : str = ["\n".join(lines[:index] )]
else:
_lowerCAmelCase : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Any = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : List[str] = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Optional[int]:
def _inner(_lowerCamelCase : str ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : str ):
return x
if key is None:
_lowerCAmelCase : Any = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : str = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Any = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Optional[int] = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : List[Any] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == "[" else 1
_lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Any = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Optional[Any] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Any = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True ) -> Optional[int]:
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Optional[int] = main_blocks[block_idx]
_lowerCAmelCase : Any = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : int = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = "\n".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Dict = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Tuple = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : List[Any]=True ) -> Any:
_lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : List[str] = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = '''vivit'''
def __init__( self : Any , _a : List[Any]=2_2_4 , _a : int=3_2 , _a : str=[2, 1_6, 1_6] , _a : List[Any]=3 , _a : List[str]=7_6_8 , _a : List[str]=1_2 , _a : str=1_2 , _a : Optional[int]=3_0_7_2 , _a : int="gelu_fast" , _a : Optional[Any]=0.0 , _a : Any=0.0 , _a : List[Any]=0.0_2 , _a : Tuple=1e-06 , _a : Tuple=True , **_a : List[str] , ):
a__: Tuple =hidden_size
a__: List[str] =num_hidden_layers
a__: List[str] =num_attention_heads
a__: Union[str, Any] =intermediate_size
a__: Optional[Any] =hidden_act
a__: str =hidden_dropout_prob
a__: Optional[int] =attention_probs_dropout_prob
a__: Optional[Any] =initializer_range
a__: Dict =layer_norm_eps
a__: Union[str, Any] =image_size
a__: Optional[int] =num_frames
a__: str =tubelet_size
a__: int =num_channels
a__: Dict =qkv_bias
super().__init__(**_a )
| 42 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: Any =random.randint(0 , len(__magic_name__ ) - 1 )
a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:]
a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ):
a__: str =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__: Union[str, Any] =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ):
a__: List[Any] =[]
# Generate more children proportionally to the fitness score.
a__: Dict =int(parent_a[1] * 100 ) + 1
a__: Tuple =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0]
a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a__: int =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__magic_name__ )
# Generate random starting population.
a__: Tuple =[]
for _ in range(__magic_name__ ):
population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__: Any =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__: Optional[int] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
a__: List[str] =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 42 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__A ):
"""simple docstring"""
lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str:
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
requires_backends(cls , ["""torch""", """scipy"""] )
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__a = "src/diffusers"
__a = "."
# This is to make sure the diffusers module imported is the one in the repo.
__a = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__a = spec.loader.load_module()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : Tuple = object_name.split(""".""" )
snake_case__ : List[str] = 0
# First let's find the module where our object lives.
snake_case__ : Tuple = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f"{module}.py" ) ):
i += 1
if i < len(lowerCAmelCase__ ):
snake_case__ : Any = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(lowerCAmelCase__ , f"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ : str = f.readlines()
# Now let's find the class / func in the code!
snake_case__ : Any = """"""
snake_case__ : List[str] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case__ : Dict = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case__ : List[str] = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
__a = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__a = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
__a = re.compile(R"<FILL\s+[^>]*>")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[Any] = code.split("""\n""" )
snake_case__ : List[str] = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Optional[Any] = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
snake_case__ : Any = f"class Bla:\n{code}"
snake_case__ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
snake_case__ : Any = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
snake_case__ , snake_case__ : List[Any] = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ : Optional[Any] = f.readlines()
snake_case__ : Union[str, Any] = []
snake_case__ : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
snake_case__ : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case__ , snake_case__ , snake_case__ : Tuple = search.groups()
snake_case__ : List[Any] = find_code_in_diffusers(lowerCAmelCase__ )
snake_case__ : int = get_indent(lowerCAmelCase__ )
snake_case__ : str = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case__ : Tuple = theoretical_indent
snake_case__ : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case__ : str = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
snake_case__ : Tuple = lines[line_index]
snake_case__ : List[str] = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f"^{indent}# End copy" , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case__ : List[Any] = lines[start_index:line_index]
snake_case__ : Union[str, Any] = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case__ : int = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
snake_case__ : List[str] = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
snake_case__ : Tuple = replace_pattern.replace("""with""" , """""" ).split(""",""" )
snake_case__ : Optional[Any] = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case__ , snake_case__ , snake_case__ : Tuple = pattern.groups()
snake_case__ : Dict = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
snake_case__ : List[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
snake_case__ : str = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case__ : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
snake_case__ : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case__ : Optional[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case__ : Any = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def __snake_case( _lowerCAmelCase = False ) -> Any:
snake_case__ : Optional[int] = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
snake_case__ : Tuple = []
for filename in all_files:
snake_case__ : int = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
snake_case__ : Tuple = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__a = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 365 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def lowerCamelCase ( self : Union[str, Any] ):
super().setUp()
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase ( self : int ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = ["""A long paragraph for summarization."""]
snake_case__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
snake_case__ : int = inputs["""input_ids"""]
snake_case__ : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase ( self : int ):
pass
def lowerCamelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
snake_case__ : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 43 | 0 |
from math import factorial
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
__lowerCAmelCase: str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowerCAmelCase: Dict = float(factorial(__SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(__SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 322 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[int] = filter_non_english
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__lowerCAmelCase: List[Any] = {}
__lowerCAmelCase: Dict = {}
for i, value in enumerate(UpperCamelCase__):
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: Union[str, Any] = i
__lowerCAmelCase: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Union[str, Any] = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCamelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowerCAmelCase: int = {}
for i, token in enumerate(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = i
__lowerCAmelCase: str = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
__lowerCAmelCase: Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
__lowerCAmelCase: Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCAmelCase: Tuple = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
__lowerCAmelCase: str = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , "do_lower_case") else False
__lowerCAmelCase: List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = ["的", "人", "有"]
__lowerCAmelCase: int = "".join(UpperCamelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Tuple = True
__lowerCAmelCase: str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: List[str] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: int = False
__lowerCAmelCase: Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase: Dict = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__)
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
@slow
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Dict = tokenizer.encode("你好" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.encode("你是谁" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.get_tokenizers(do_lower_case=UpperCamelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__lowerCAmelCase: str = "你好,你是谁"
__lowerCAmelCase: Dict = tokenizer.tokenize(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__)
__lowerCAmelCase: Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
| 217 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def _a ( lowerCamelCase: TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(lowerCamelCase: TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase , lowerCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCamelCase ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
lowerCamelCase: TreeNode | None , lowerCamelCase: float , lowerCamelCase: float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCamelCase )
)
return is_binary_search_tree_recursive_check(lowerCamelCase , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Any , _UpperCamelCase :int=5_0244 , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Optional[int]=32 , _UpperCamelCase :Dict=128 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1e-6 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Union[str, Any]="gelu_new" , _UpperCamelCase :int=0 , _UpperCamelCase :int=False , _UpperCamelCase :int=0 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=False , _UpperCamelCase :Optional[Any]=True , **_UpperCamelCase :Tuple , )-> Dict:
__A = vocab_size
__A = hidden_size
__A = d_kv
__A = d_ff
__A = num_layers
__A = num_heads
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = dropout_rate
__A = layer_norm_epsilon
__A = initializer_factor
__A = use_cache
__A = eos_token_id
__A = decoder_start_token_id
# for backwards compatibility
__A = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__(self :Dict , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :List[str]=768 , _UpperCamelCase :Any=2048 , _UpperCamelCase :Tuple=64 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Tuple="gelu_new" , _UpperCamelCase :Dict=1e-6 , _UpperCamelCase :int=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Union[str, Any]=1e-10 , _UpperCamelCase :Tuple=1.0 , _UpperCamelCase :Tuple=4096 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :Optional[Any]=128 , **_UpperCamelCase :List[str] , )-> Any:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = patch_embed_hidden_size
__A = d_ff
__A = dropout_rate
__A = num_hidden_layers
__A = num_attention_heads
__A = initializer_range
__A = initializer_factor
__A = attention_dropout
__A = layer_norm_eps
__A = dense_act_fn
__A = seq_len
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = d_kv
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__(self :List[Any] , _UpperCamelCase :str=None , _UpperCamelCase :int=None , _UpperCamelCase :List[Any]=1.0 , _UpperCamelCase :int=0.0_2 , _UpperCamelCase :List[str]=False , _UpperCamelCase :Optional[Any]=False , _UpperCamelCase :int=True , **_UpperCamelCase :Any , )-> Optional[Any]:
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
__A = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__A = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__A = PixaStructTextConfig(**_UpperCamelCase )
__A = PixaStructVisionConfig(**_UpperCamelCase )
__A = self.text_config.decoder_start_token_id
__A = self.text_config.pad_token_id
__A = self.text_config.eos_token_id
__A = initializer_factor
__A = initializer_range
__A = self.initializer_range
__A = self.initializer_range
__A = is_vqa
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :PixaStructTextConfig , _UpperCamelCase :PixaStructVisionConfig , **_UpperCamelCase :Union[str, Any] )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 250 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 | 1 |
'''simple docstring'''
_lowerCamelCase : List[str] = 'Tobias Carryer'
from time import time
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : str=int(time() ) ) -> str: # noqa: B008
A = multiplier
A = increment
A = modulo
A = seed
def A (self : Tuple ) -> Union[str, Any]:
A = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_lowerCamelCase : Optional[int] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 361 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
if len(__A ) <= 1:
return lst
__UpperCamelCase = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCamelCase , __UpperCamelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCamelCase = 1
return lst
if __name__ == "__main__":
a__ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
a__ : Any = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase , UpperCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
lowerCAmelCase__ : Tuple = answer
return answer
lowerCAmelCase__ : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = [0] * (target + 1)
lowerCAmelCase__ : List[Any] = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 184 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
if not nums:
return 0
SCREAMING_SNAKE_CASE__ = nums[0]
SCREAMING_SNAKE_CASE__ = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE__ = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = field(default=A , metadata={"""help""": """Whether to use SortishSampler or not."""} )
lowerCAmelCase__ = field(
default=A , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCAmelCase__ = field(
default=A , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
lowerCAmelCase__ = field(
default=A , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
lowerCAmelCase__ = field(
default=A , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =super().to_dict()
for k, v in d.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =v.to_dict()
return d
| 365 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=1):
'''simple docstring'''
__lowercase =tokenizer
__lowercase =dataset
__lowercase =len(_lowerCAmelCase) if n_tasks is None else n_tasks
__lowercase =n_copies
def __iter__( self : Union[str, Any]):
'''simple docstring'''
__lowercase =[]
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip())
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =start_length
__lowercase =eof_strings
__lowercase =tokenizer
def __call__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :])
__lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(_lowerCAmelCase)
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =re.split('(%s)' % '|'.join(_lowerCAmelCase ) , _lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=20 , **_lowerCAmelCase ):
"""simple docstring"""
__lowercase =defaultdict(_lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCAmelCase ) ):
with torch.no_grad():
__lowercase =batch['ids'].shape[-1]
__lowercase =accelerator.unwrap_model(_lowerCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase )
# each task is generated batch_size times
__lowercase =batch['task_id'].repeat(_lowerCAmelCase )
__lowercase =accelerator.pad_across_processes(
_lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
__lowercase , __lowercase =accelerator.gather((generated_tokens, generated_tasks) )
__lowercase =generated_tokens.cpu().numpy()
__lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ):
gen_token_dict[task].append(_lowerCAmelCase )
__lowercase =[[] for _ in range(_lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowercase =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
code_gens[task].append(remove_last_block(_lowerCAmelCase ) )
return code_gens
def _A ( ):
"""simple docstring"""
__lowercase =HfArgumentParser(_lowerCAmelCase )
__lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowercase ='false'
if args.num_workers is None:
__lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowercase =Accelerator()
set_seed(args.seed , device_specific=_lowerCAmelCase )
# Load model and tokenizer
__lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase =tokenizer.eos_token
__lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowercase ={
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
__lowercase =load_dataset('openai_humaneval' )
__lowercase =load_metric('code_eval' )
__lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
__lowercase =args.n_samples // args.batch_size
__lowercase =TokenizedDataset(_lowerCAmelCase , human_eval['test'] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowercase =DataLoader(_lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowercase =code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
__lowercase , __lowercase =accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =complete_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , )
if accelerator.is_main_process:
__lowercase =[]
for task in tqdm(range(_lowerCAmelCase ) ):
__lowercase =human_eval['test'][task]['test']
__lowercase =f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
__lowercase , __lowercase =code_eval_metric.compute(
references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 48 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase__ : Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : list = None ):
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : int = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase_ : int = os.path.abspath('examples' )
for item in os.listdir(SCREAMING_SNAKE_CASE_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase_ : int = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = '\n'.join(SCREAMING_SNAKE_CASE_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase_ : List[str] = diff.replace(SCREAMING_SNAKE_CASE_ , '' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
self.one_complete_example('complete_nlp_example.py' , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('complete_nlp_example.py' , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase_ : List[Any] = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('complete_cv_example.py' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@mock.patch.dict(os.environ, {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ):
super().setUpClass()
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Dict = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase_ : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : str = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCAmelCase_ : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCAmelCase_ : Tuple = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
self.assertNotIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCAmelCase_ : Dict = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
if torch.cuda.is_available():
lowerCAmelCase_ : Any = torch.cuda.device_count()
else:
lowerCAmelCase_ : Tuple = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
else:
self.assertIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = re.findall('({.+})' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase_ : Optional[int] = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Union[str, Any] = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase_ : Dict = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'tracking' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 224 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Dict=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Any = embeddings_size
lowerCAmelCase_ : Dict = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Optional[int] = scope
lowerCAmelCase_ : Tuple = len(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : int = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = TFRegNetModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : str = layer_type
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str={} ):
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : int = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
lowerCAmelCase_ : List[str] = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCAmelCase_ : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 224 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__a = True
except (ImportError, ModuleNotFoundError):
__a = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __snake_case( _lowerCAmelCase ) -> str:
re.sub("""<n>""" , """""" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 43 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 1 |
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
if num < 0:
return False
__A = num
__A = 0
while num > 0:
__A = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 |
from __future__ import annotations
def _a ( lowerCamelCase: list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(lowerCamelCase , [] , 0 , [0 for i in range(len(lowerCamelCase ) )] )
def _a ( lowerCamelCase: list[int | str] , lowerCamelCase: list[int | str] , lowerCamelCase: int , lowerCamelCase: list[int] , ) -> None:
'''simple docstring'''
if index == len(lowerCamelCase ):
print(lowerCamelCase )
return
for i in range(len(lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A = True
create_state_space_tree(lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase )
current_sequence.pop()
__A = False
snake_case__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 117 | 1 |
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Optional[Any] ):
__lowercase = None
__lowercase = None
__lowercase = graph
self._normalize_graph(lowercase__ ,lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ):
if sources is int:
__lowercase = [sources]
if sinks is int:
__lowercase = [sinks]
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
return
__lowercase = sources[0]
__lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase__ ) > 1 or len(lowercase__ ) > 1:
__lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
__lowercase = max_input_flow
__lowercase = 0
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase = max_input_flow
__lowercase = size - 1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Any ):
__lowercase = algorithm(self )
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : List[str] ):
__lowercase = flow_network
__lowercase = flow_network.verticesCount
__lowercase = flow_network.sourceIndex
__lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase = flow_network.graph
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : str ):
if not self.executed:
self._algorithm()
__lowercase = True
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : List[str] ):
super().__init__(lowercase__ )
# use this to save your result
__lowercase = -1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : List[Any] ):
super().__init__(lowercase__ )
__lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase = [0] * self.verticies_count
__lowercase = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase = 0
while i < len(lowercase__ ):
__lowercase = vertices_list[i]
__lowercase = self.heights[vertex_index]
self.process_vertex(lowercase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(lowercase__ ) )
__lowercase = 0
else:
i += 1
__lowercase = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase__ ,lowercase__ )
self.relabel(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ):
__lowercase = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[str] ):
__lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase = self.heights[to_index]
if min_height is not None:
__lowercase = min_height + 1
if __name__ == "__main__":
lowerCAmelCase__ = [0]
lowerCAmelCase__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCAmelCase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCAmelCase__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCAmelCase__ = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 52 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'luke'
def __init__( self : int ,lowercase__ : Tuple=5_0_2_6_7 ,lowercase__ : str=5_0_0_0_0_0 ,lowercase__ : Union[str, Any]=7_6_8 ,lowercase__ : Any=2_5_6 ,lowercase__ : int=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : Dict="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=1 ,lowercase__ : int=0 ,lowercase__ : Tuple=2 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 52 | 1 |
'''simple docstring'''
from itertools import product
def lowercase_ ( _lowercase , _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : int = sides_number
lowerCamelCase_ : Tuple = max_face_number * dice_number
lowerCamelCase_ : int = [0] * (max_total + 1)
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Dict = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
lowerCamelCase_ : Optional[Any] = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) -> float:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Tuple = 9
lowerCamelCase_ : Dict = 4 * 9
lowerCamelCase_ : Any = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ : str = (4**9) * (6**6)
lowerCamelCase_ : List[str] = peter_wins_count / total_games_number
lowerCamelCase_ : Dict = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 318 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCamelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCamelCase = """</w>"""
_lowerCamelCase = """@@ """
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = set()
UpperCAmelCase_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
_lowerCamelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[Any] =VOCAB_FILES_NAMES
__A : List[Any] =PRETRAINED_VOCAB_FILES_MAP
__A : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int =["input_ids", "attention_mask"]
def __init__( self ,_snake_case ,_snake_case="<s>" ,_snake_case="<pad>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case=False ,_snake_case=None ,**_snake_case ,):
super().__init__(
unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,do_lower_case=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Optional[Any] = do_lower_case
with open(_snake_case ,encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Optional[Any] = json.load(_snake_case )
UpperCAmelCase_ : str = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
else:
with open(_snake_case ,encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : Dict = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase_ : List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[str] = {}
@property
def UpperCamelCase__ ( self ):
return len(self.decoder )
def UpperCamelCase__ ( self ):
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Optional[int] = get_pairs(_snake_case )
if not pairs:
return token
while True:
UpperCAmelCase_ : Dict = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ : Optional[Any] = bigram
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Any = 0
while i < len(_snake_case ):
try:
UpperCAmelCase_ : int = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Optional[Any] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Any = tuple(_snake_case )
UpperCAmelCase_ : Union[str, Any] = new_word
if len(_snake_case ) == 1:
break
else:
UpperCAmelCase_ : Optional[Any] = get_pairs(_snake_case )
UpperCAmelCase_ : Optional[int] = " ".join(_snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_ : Optional[Any] = "\n" + BPE_TOKEN_MERGES
if word.endswith(_snake_case ):
UpperCAmelCase_ : Union[str, Any] = word.replace(_snake_case ,"" )
UpperCAmelCase_ : Any = word.replace(" " ,_snake_case )
UpperCAmelCase_ : List[str] = word
return word
def UpperCamelCase__ ( self ,_snake_case ):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase_ : Optional[Any] = text.lower()
UpperCAmelCase_ : Union[str, Any] = text.split()
UpperCAmelCase_ : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case ).split(" " ) ) )
return split_tokens
def UpperCamelCase__ ( self ,_snake_case ):
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.decoder.get(_snake_case ,self.unk_token )
return result
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Dict = " ".join(_snake_case )
# make sure @@ tokens are concatenated
UpperCAmelCase_ : Any = "".join(string.split(_snake_case ) )
return string
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Tuple = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : List[str] = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_snake_case ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + "\n" )
UpperCAmelCase_ : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case ,"w" ,encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : Tuple = token_index
writer.write(" ".join(_snake_case ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 356 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowerCamelCase = datasets.utils.logging.get_logger(__name__)
class _snake_case (folder_based_builder.FolderBasedBuilderConfig):
__A : bool =None
__A : bool =None
class _snake_case (folder_based_builder.FolderBasedBuilder):
__A : Union[str, Any] =datasets.Audio()
__A : Optional[int] ="audio"
__A : Any =AudioFolderConfig
__A : List[str] # definition at the bottom of the script
__A : Optional[int] =AudioClassification(audio_column="audio" , label_column="label")
_lowerCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_lowerCamelCase = AUDIO_EXTENSIONS
| 67 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
a ={
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
a ={
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = BertTokenizer
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" ,SCREAMING_SNAKE_CASE__ : str="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : str="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : str = do_lower_case
__lowerCamelCase : Any = strip_accents
__lowerCamelCase : Any = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None):
__lowerCamelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : str = [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 73 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=lowerCAmelCase__ )
env_command_parser(subparsers=lowerCAmelCase__ )
launch_command_parser(subparsers=lowerCAmelCase__ )
tpu_command_parser(subparsers=lowerCAmelCase__ )
test_command_parser(subparsers=lowerCAmelCase__ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 368 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
lowerCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ : int = x_den * y_den * z_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( lowerCAmelCase__ : int = 35 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : set = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : Fraction = Fraction(0 )
lowerCAmelCase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ : str = x_num * y_den + x_den * y_num
lowerCAmelCase_ : int = x_den * y_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : List[str] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[str] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Union[str, Any] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
lowerCAmelCase_ : Dict = x_num * y_num
lowerCAmelCase_ : Optional[int] = x_den * y_num + x_num * y_den
lowerCAmelCase_ : Any = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Tuple = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : List[str] = x_num * x_num * y_num * y_num
lowerCAmelCase_ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Any = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 289 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''megatron-bert'''
def __init__( self ,A__=2_9_0_5_6 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__=True ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
| 101 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase : List[Any] = """PoolFormerConfig"""
# Base docstring
_lowerCAmelCase : Optional[int] = """sail/poolformer_s12"""
_lowerCAmelCase : Union[str, Any] = [1, 512, 7, 7]
# Image classification docstring
_lowerCAmelCase : Tuple = """sail/poolformer_s12"""
_lowerCAmelCase : Tuple = """tabby, tabby cat"""
_lowerCAmelCase : str = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase_( _snake_case : str , _snake_case : Union[str, Any] = 0.0 , _snake_case : str = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
__a =1 - drop_prob
__a =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__a =keep_prob + torch.rand(_snake_case , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__a =input.div(_snake_case ) * random_tensor
return output
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case = None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__a =drop_prob
def __magic_name__ ( self , __snake_case ) -> Tuple:
'''simple docstring'''
return drop_path(__lowercase , self.drop_prob , self.training )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ) -> Tuple:
'''simple docstring'''
super().__init__()
__a =patch_size if isinstance(__lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
__a =stride if isinstance(__lowercase , collections.abc.Iterable ) else (stride, stride)
__a =padding if isinstance(__lowercase , collections.abc.Iterable ) else (padding, padding)
__a =nn.Convad(__lowercase , __lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase )
__a =norm_layer(__lowercase ) if norm_layer else nn.Identity()
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a =self.projection(__lowercase )
__a =self.norm(__lowercase )
return embeddings
class __magic_name__ ( nn.GroupNorm ):
def __init__( self , __snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
super().__init__(1 , __lowercase , **__lowercase )
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case ) -> int:
'''simple docstring'''
super().__init__()
__a =nn.AvgPoolad(__lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowercase )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
return self.pool(__lowercase ) - hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
super().__init__()
__a =nn.Convad(__lowercase , __lowercase , 1 )
__a =nn.Convad(__lowercase , __lowercase , 1 )
__a =PoolFormerDropPath(__lowercase )
if isinstance(config.hidden_act , __lowercase ):
__a =ACTaFN[config.hidden_act]
else:
__a =config.hidden_act
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =self.conva(__lowercase )
__a =self.act_fn(__lowercase )
__a =self.drop(__lowercase )
__a =self.conva(__lowercase )
__a =self.drop(__lowercase )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
super().__init__()
__a =PoolFormerPooling(__lowercase )
__a =PoolFormerOutput(__lowercase , __lowercase , __lowercase , __lowercase )
__a =PoolFormerGroupNorm(__lowercase )
__a =PoolFormerGroupNorm(__lowercase )
# Useful for training neural nets
__a =PoolFormerDropPath(__lowercase ) if drop_path > 0.0 else nn.Identity()
__a =config.use_layer_scale
if config.use_layer_scale:
__a =nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
__a =nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
if self.use_layer_scale:
__a =self.pooling(self.before_norm(__lowercase ) )
__a =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__a =hidden_states + self.drop_path(__lowercase )
__a =()
__a =self.output(self.after_norm(__lowercase ) )
__a =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__a =hidden_states + self.drop_path(__lowercase )
__a =(output,) + outputs
return outputs
else:
__a =self.drop_path(self.pooling(self.before_norm(__lowercase ) ) )
# First residual connection
__a =pooling_output + hidden_states
__a =()
# Second residual connection inside the PoolFormerOutput block
__a =self.drop_path(self.output(self.after_norm(__lowercase ) ) )
__a =hidden_states + layer_output
__a =(output,) + outputs
return outputs
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case ) -> Dict:
'''simple docstring'''
super().__init__()
__a =config
# stochastic depth decay rule
__a =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__a =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__a =nn.ModuleList(__lowercase )
# Transformer blocks
__a =[]
__a =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__a =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowercase ) )
__a =nn.ModuleList(__lowercase )
def __magic_name__ ( self , __snake_case , __snake_case=False , __snake_case=True ) -> Dict:
'''simple docstring'''
__a =() if output_hidden_states else None
__a =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__a , __a =layers
# Get patch embeddings from hidden_states
__a =embedding_layer(__lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowercase ):
__a =blk(__lowercase )
__a =layer_outputs[0]
if output_hidden_states:
__a =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
class __magic_name__ ( __A ):
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __magic_name__ ( self , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
__a =value
_lowerCAmelCase : Any = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase : List[Any] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __A , )
class __magic_name__ ( __A ):
def __init__( self , __snake_case ) -> Dict:
'''simple docstring'''
super().__init__(__lowercase )
__a =config
__a =PoolFormerEncoder(__lowercase )
# Initialize weights and apply final processing
self.post_init()
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __magic_name__ ( self , __snake_case = None , __snake_case = None , __snake_case = None , ) -> str:
'''simple docstring'''
__a =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a =self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
__a =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowercase , hidden_states=encoder_outputs.hidden_states , )
class __magic_name__ ( nn.Module ):
def __init__( self , __snake_case ) -> str:
'''simple docstring'''
super().__init__()
__a =nn.Linear(config.hidden_size , config.hidden_size )
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a =self.dense(__lowercase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __A , )
class __magic_name__ ( __A ):
def __init__( self , __snake_case ) -> int:
'''simple docstring'''
super().__init__(__lowercase )
__a =config.num_labels
__a =PoolFormerModel(__lowercase )
# Final norm
__a =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__a =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __magic_name__ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ) -> Union[str, Any]:
'''simple docstring'''
__a =return_dict if return_dict is not None else self.config.use_return_dict
__a =self.poolformer(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
__a =outputs[0]
__a =self.classifier(self.norm(__lowercase ).mean([-2, -1] ) )
__a =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a ='single_label_classification'
else:
__a ='multi_label_classification'
if self.config.problem_type == "regression":
__a =MSELoss()
if self.num_labels == 1:
__a =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a =loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
__a =CrossEntropyLoss()
__a =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a =BCEWithLogitsLoss()
__a =loss_fct(__lowercase , __lowercase )
if not return_dict:
__a =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 359 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase : str = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase : str = concatenate_datasets
_lowercase : Any = DownloadConfig
_lowercase : Tuple = DownloadManager
_lowercase : Dict = DownloadMode
_lowercase : Union[str, Any] = DownloadConfig
_lowercase : str = DownloadMode
_lowercase : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 93 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCAmelCase :Optional[Any] = '''.'''
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
lowerCAmelCase :int = []
lowerCAmelCase :str = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCAmelCase :Union[str, Any] = line.strip()
lowerCAmelCase :Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCAmelCase :Union[str, Any] = '''\n'''.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''') | 350 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase :Optional[int] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase , id=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
if exitstatus == 5:
__magic_name__ : Any = 0
# Doctest custom flag to ignore output.
lowerCAmelCase :List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
lowerCAmelCase :Union[str, Any] = doctest.OutputChecker
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : Tuple , _A : str ) -> int:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowerCAmelCase :Optional[Any] = CustomOutputChecker
lowerCAmelCase :int = HfDoctestModule
lowerCAmelCase :Any = HfDocTestParser | 275 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self : int , lowercase : Union[str, Any] , lowercase : Dict=7 , lowercase : Union[str, Any]=3 , lowercase : int=30 , lowercase : List[str]=400 , lowercase : List[Any]=True , lowercase : Union[str, Any]=None , lowercase : Union[str, Any]=True , lowercase : Any=[0.5, 0.5, 0.5] , lowercase : int=[0.5, 0.5, 0.5] , lowercase : List[Any]=True , lowercase : Tuple=1 / 255 , lowercase : List[str]=True , ):
'''simple docstring'''
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_pad
def A ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Union[str, Any] , lowercase : Dict , lowercase : Optional[Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase = image_inputs[0]
if isinstance(lowercase , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = self.size['''shortest_edge''']
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(lowercase , key=lambda lowercase : item[0] )[0]
UpperCAmelCase = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( __a , unittest.TestCase ):
__a : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def A ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase , '''image_std''' ) )
self.assertTrue(hasattr(lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase , '''size''' ) )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , lowercase )
UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
UpperCAmelCase = image_processing(lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(lowercase , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(lowercase , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCAmelCase = image_processing(images=lowercase , annotations=lowercase , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase )
UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase ) )
# verify class_labels
UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase ) )
# verify orig_size
UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase ) )
# verify size
UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase ) )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase )
UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase ) )
# verify class_labels
UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase ) )
# verify masks
UpperCAmelCase = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowercase )
# verify orig_size
UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase ) )
# verify size
UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase ) )
| 34 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _a :
def __init__( self : Optional[int], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[Any]=1_3, lowerCAmelCase__ : List[str]=6_4, lowerCAmelCase__ : Optional[int]=2, lowerCAmelCase__ : Optional[int]=3, lowerCAmelCase__ : List[Any]=True, lowerCAmelCase__ : str=True, lowerCAmelCase__ : str=3_2, lowerCAmelCase__ : Union[str, Any]=5, lowerCAmelCase__ : Optional[Any]=4, lowerCAmelCase__ : Tuple=3_7, lowerCAmelCase__ : str="gelu", lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Optional[int]=1_0, lowerCAmelCase__ : Union[str, Any]=0.02, lowerCAmelCase__ : List[str]=[1, 1_6, 4, 4], lowerCAmelCase__ : Union[str, Any]=None, ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : Dict = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Dict = scope
_UpperCamelCase : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase : Dict = (self.image_size // 3_2) ** 2
_UpperCamelCase : List[str] = num_patches + 1
def snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=lowerCAmelCase__, )
def snake_case ( self : List[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = ViTHybridModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : str, lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.type_sequence_label_size
_UpperCamelCase : str = ViTHybridForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Dict = model(lowerCAmelCase__, labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ViTHybridModelTester(self )
_UpperCamelCase : List[Any] = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=3_7 )
def snake_case ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear ) )
def snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(lowerCAmelCase__ )
_UpperCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase__ )
def snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Union[str, Any] = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(config=lowerCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase : List[Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"""Parameter {name} of model {model_class} seems not properly initialized""", )
@slow
def snake_case ( self : Tuple ) -> List[str]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = ViTHybridModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a_ ( ):
_UpperCamelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def snake_case ( self : List[str] ) -> Dict:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.default_image_processor
_UpperCamelCase : Optional[Any] = prepare_img()
_UpperCamelCase : List[Any] = image_processor(images=lowerCAmelCase__, return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : int = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase__ )
_UpperCamelCase : Any = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase__, atol=1e-4 ) )
@slow
@require_accelerate
def snake_case ( self : Any ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[str] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_UpperCamelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Any = image_processor(images=lowerCAmelCase__, return_tensors='''pt''' )
_UpperCamelCase : List[str] = model(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 354 |
"""simple docstring"""
def a_ ( _lowercase = 100 ):
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 128 | 0 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Any:
# Initialise PyTorch model
UpperCAmelCase : Any = AlbertConfig.from_json_file(_lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : Tuple = AlbertForPreTraining(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 265 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 358 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case_ : List[Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Dict = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = "sgugger/tiny-distilbert-classification"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
snake_case_ : int = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
snake_case_ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) ).exists() )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "sequential" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "cumulative" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "current" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) ).exists() )
| 36 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase_ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowercase_ = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self , A ) -> Optional[int]:
return FSMTTokenizer.from_pretrained(A )
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def snake_case_( self , A , A ) -> List[Any]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_SCREAMING_SNAKE_CASE = f'facebook/wmt19-{pair}'
_SCREAMING_SNAKE_CASE = self.get_tokenizer(A )
_SCREAMING_SNAKE_CASE = self.get_model(A )
_SCREAMING_SNAKE_CASE = bleu_data[pair]["""src"""]
_SCREAMING_SNAKE_CASE = bleu_data[pair]["""tgt"""]
_SCREAMING_SNAKE_CASE = tokenizer(A , return_tensors="""pt""" , truncation=A , padding="""longest""" ).to(A )
_SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
_SCREAMING_SNAKE_CASE = calculate_bleu(A , A )
print(A )
self.assertGreaterEqual(scores["""bleu"""] , A )
| 58 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Tuple = random.Random()
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=1.0 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=None ) -> List[str]:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : int=2000 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=16000 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : Tuple="hann_window" , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : List[str]=7600 , __SCREAMING_SNAKE_CASE : List[str]=1e-1_0 , __SCREAMING_SNAKE_CASE : Any=True , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = do_normalize
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = return_attention_mask
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str:
def _flatten(__SCREAMING_SNAKE_CASE : Any ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=False ) -> int:
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = SpeechTaFeatureExtractor
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase ( self : List[Any] ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = range(800 , 1400 , 200 )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1000 , padding='max_length' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=2000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase ( self : List[Any] ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(audio_target=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCamelCase ( self : int ) -> Union[str, Any]:
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self : Tuple ) -> Any:
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = min(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(
__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='np' )
self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : str ) -> Dict:
# fmt: off
lowerCamelCase_ = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-6 ) )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
# fmt: off
lowerCamelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(audio_target=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 183 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
_snake_case , _snake_case = shutil.get_terminal_size()
_snake_case = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class a__ ( enum.Enum ):
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
def _A ( snake_case , snake_case="" ) -> Any:
sys.stdout.write(str(snake_case ) + end )
sys.stdout.flush()
def _A ( snake_case , snake_case , snake_case="" ) -> List[str]:
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , snake_case )
def _A ( ) -> Union[str, Any]:
forceWrite("\r" )
def _A ( snake_case , snake_case ) -> Optional[Any]:
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def _A ( ) -> int:
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _A ( ) -> Optional[Any]:
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 199 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class a__ ( lowerCamelCase_ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 199 | 1 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 109 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( __a : Union[str, Any] , __a : Any , __a : Union[str, Any]=None ):
'''simple docstring'''
if rng is None:
UpperCamelCase__ = random.Random()
UpperCamelCase__ = 1
for dim in shape:
total_dims *= dim
UpperCamelCase__ = []
for _ in range(__a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase__ = np.array(__a , dtype=jnp.intaa ).reshape(__a )
return output
def __magic_name__ ( __a : Dict , __a : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor(__a , vocab_size=2 , rng=__a )
# make sure that at least one token is attended to for each batch
UpperCamelCase__ = 1
return attn_mask
@require_flax
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = ()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase__ = 2
UpperCamelCase__ = inputs["""input_ids"""].shape[-1] // 2
UpperCamelCase__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , flax_model.params )
UpperCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE_ ).sequences
UpperCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
UpperCamelCase__ = 0.8
UpperCamelCase__ = 10
UpperCamelCase__ = 0.3
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = 2
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
UpperCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """do_samples""" ):
model.generate(SCREAMING_SNAKE_CASE_ , do_samples=SCREAMING_SNAKE_CASE_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """foo""" ):
UpperCamelCase__ = {"""foo""": """bar"""}
model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 244 | 0 |
'''simple docstring'''
from math import pi
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a__: List[str] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a__: Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCamelCase__( UpperCamelCase__ : str )->int:
A__ = (images / 2 + 0.5).clamp(0 , 1 )
A__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A__ = numpy_to_pil(UpperCamelCase__ )
return images
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->Union[str, Any]:
if images.ndim == 3:
A__ = images[None, ...]
A__ = (images * 2_55).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A__ = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
A__ = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 193 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DPMSolverSinglestepScheduler,)
__SCREAMING_SNAKE_CASE = (('''num_inference_steps''', 25),)
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(__lowerCamelCase,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self,__lowerCamelCase=None,**__lowerCamelCase ):
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 50
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,algorithm_type='''dpmsolver++''',solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,)
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,prediction_type=__lowerCamelCase,algorithm_type=__lowerCamelCase,)
A__ = self.full_loop(
solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,prediction_type=__lowerCamelCase,algorithm_type=__lowerCamelCase,)
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase ( self ):
self.check_over_configs(variance_type=__lowerCamelCase )
self.check_over_configs(variance_type='''learned_range''' )
def UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase,time_step=0 )
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(use_karras_sigmas=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''',use_karras_sigmas=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=__lowerCamelCase,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 193 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = 'roc_bert'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=768 , _UpperCamelCase=910 , _UpperCamelCase=512 , _UpperCamelCase=24858 , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = vocab_size
_lowercase : List[str] = max_position_embeddings
_lowercase : List[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Dict = initializer_range
_lowercase : List[Any] = type_vocab_size
_lowercase : Tuple = layer_norm_eps
_lowercase : Optional[int] = use_cache
_lowercase : Tuple = enable_pronunciation
_lowercase : Optional[int] = enable_shape
_lowercase : int = pronunciation_embed_dim
_lowercase : List[str] = pronunciation_vocab_size
_lowercase : int = shape_embed_dim
_lowercase : str = shape_vocab_size
_lowercase : str = concat_input
_lowercase : Dict = position_embedding_type
_lowercase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 199 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : Dict = 'BlipImageProcessor'
_SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
# add QFormer tokenizer
_lowercase : List[Any] = qformer_tokenizer
def __call__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_lowercase : str = BatchFeature()
if text is not None:
_lowercase : Dict = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
encoding.update(_UpperCamelCase )
_lowercase : Dict = self.qformer_tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : Union[str, Any] = qformer_text_encoding.pop("input_ids" )
_lowercase : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_lowercase : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
if os.path.isfile(_UpperCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_lowercase : Union[str, Any] = os.path.join(_UpperCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_UpperCamelCase )
return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="qformer_tokenizer" )
_lowercase : Optional[Any] = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase )
args.append(_UpperCamelCase )
return cls(*_UpperCamelCase )
| 199 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase :Any = logging.get_logger(__name__)
lowerCAmelCase :Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase :Dict = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase :Union[str, Any] = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
lowerCAmelCase :Tuple = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Dict = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PRETRAINED_INIT_CONFIGURATION
A_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
A_ : Union[str, Any] = DistilBertTokenizer
def __init__( self : Union[str, Any] , _A : Optional[Any]=None , _A : Optional[int]=None , _A : Tuple=True , _A : Dict="[UNK]" , _A : List[str]="[SEP]" , _A : Optional[int]="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : Dict="[MASK]" , _A : int=True , _A : Optional[Any]=None , **_A : List[str] , ) -> Dict:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__magic_name__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__magic_name__ : str = getattr(_A , normalizer_state.pop('type' ) )
__magic_name__ : Any = do_lower_case
__magic_name__ : Optional[Any] = strip_accents
__magic_name__ : Optional[int] = tokenize_chinese_chars
__magic_name__ : Dict = normalizer_class(**_A )
__magic_name__ : List[str] = do_lower_case
def __lowerCAmelCase ( self : Dict , _A : Any , _A : Optional[Any]=None ) -> Optional[Any]:
__magic_name__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
__magic_name__ : Optional[int] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A ) | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :List[str] = BlenderbotSmallConfig
_UpperCAmelCase :List[Any] = {}
_UpperCAmelCase :List[str] = "gelu"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
lowercase__: Dict = parent
lowercase__: int = batch_size
lowercase__: Union[str, Any] = seq_length
lowercase__: List[str] = is_training
lowercase__: List[Any] = use_labels
lowercase__: Any = vocab_size
lowercase__: List[str] = hidden_size
lowercase__: Any = num_hidden_layers
lowercase__: List[Any] = num_attention_heads
lowercase__: Any = intermediate_size
lowercase__: List[str] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: Any = eos_token_id
lowercase__: str = pad_token_id
lowercase__: Union[str, Any] = bos_token_id
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__: Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__: Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__: int = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
lowercase__: Optional[int] = inputs_dict['''input_ids''']
lowercase__: str = input_ids[:1, :]
lowercase__: List[Any] = inputs_dict['''attention_mask'''][:1, :]
lowercase__: Tuple = inputs_dict['''head_mask''']
lowercase__: List[str] = 1
# first forward pass
lowercase__: List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
lowercase__, lowercase__: List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__: Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__: List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__: List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__: Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowercase__: Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[Any]:
if attention_mask is None:
lowercase__: List[Any] = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__: Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__: List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCAmelCase :str = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase :List[Any] = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Dict = False
def _snake_case ( self ):
lowercase__: Optional[int] = TFBlenderbotSmallModelTester(self )
lowercase__: Tuple = ConfigTester(self , config_class=_UpperCAmelCase )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
_UpperCAmelCase :str = "facebook/blenderbot_small-90M"
@cached_property
def _snake_case ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def _snake_case ( self ):
lowercase__: str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _snake_case ( self ):
lowercase__: Tuple = self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase__: Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_UpperCAmelCase , )
lowercase__: Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 2 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.